diff --git a/dana/configs/db/Inference/infos/benchmarks.series.json b/dana/configs/db/Inference/infos/benchmarks.series.json deleted file mode 100644 index a7088652f7941e3752553d359a5c013b4f03f803..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/infos/benchmarks.series.json +++ /dev/null @@ -1 +0,0 @@ -{"pytorch_bert_0_forward_peak_memory_MB_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":553.6666666666666,"ratio":18.48621536351449,"diff":86.38305484057952},"base":{"average":467.2836118260871}},"state":"regressionNeedstriage"},"pytorch_bert_0_forward_latency_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":0.0035975,"ratio":-0.5234050957930142,"diff":-0.00001892857142857165},"base":{"average":0.0036164285714285717}},"state":"similarNeedstriage"},"pytorch_bert_0_forward_throughput_samples_s_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":1083.3333333333333,"ratio":296.46233607807255,"diff":810.0833333333333},"base":{"average":273.25}},"state":"regressionNeedstriage"},"pytorch_bert_1_forward_peak_memory_MB_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":551,"ratio":17.66560507389197,"diff":82.72382052173913},"base":{"average":468.2761794782609}},"state":"regressionNeedstriage"},"pytorch_bert_1_forward_latency_s_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":0.005028235294117648,"ratio":43.88150143073557,"diff":0.0015335294117647062},"base":{"average":0.0034947058823529414}},"state":"regressionNeedstriage"},"pytorch_bert_1_forward_throughput_samples_s_":{"status":{"lastBuildId":14023,"status":"improvement","current":{"average":782.7142857142857,"ratio":-31.763369963369964,"diff":-364.34453781512605},"base":{"average":1147.0588235294117}},"state":"improvementNeedstriage"},"pytorch_gpt2_0_forward_peak_memory_MB_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":555,"ratio":18.23252547688723,"diff":85.58602295652173},"base":{"average":469.41397704347827}},"state":"regressionNeedstriage"},"pytorch_gpt2_0_forward_latency_s_":{"status":{"lastBuildId":14023,"status":"improvement","current":{"average":0.0032655555555555554,"ratio":-13.988879133743058,"diff":-0.0005311111111111115},"base":{"average":0.003796666666666667}},"state":"improvementNeedstriage"},"pytorch_gpt2_0_forward_throughput_samples_s_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":608.1666666666666,"ratio":130.8032890575585,"diff":344.66666666666663},"base":{"average":263.5}},"state":"regressionNeedstriage"},"pytorch_gpt2_0_generate_latency_s_":{"status":{"lastBuildId":14023,"status":"improvement","current":{"average":0.30366666666666664,"ratio":-38.244900474517785,"diff":-0.1880606060606061},"base":{"average":0.49172727272727274}},"state":"improvementNeedstriage"},"pytorch_gpt2_0_generate_throughput_tokens_s_":{"status":{"lastBuildId":14023,"status":"regression","current":{"average":658.8333333333334,"ratio":231.18034899861192,"diff":459.8984220907298},"base":{"average":198.93491124260356}},"state":"regressionNeedstriage"},"llama_1gpu_0_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_0_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_0_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_0_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_1_forward_latency_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":0.0032233333333333333,"ratio":0,"diff":0},"base":{"average":0.0032233333333333333}},"state":"similarNeedstriage"},"llama_1gpu_1_forward_throughput_samples_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":310.1666666666667,"ratio":0,"diff":0},"base":{"average":310.1666666666667}},"state":"similarNeedstriage"},"llama_1gpu_1_generate_latency_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":0.512,"ratio":0,"diff":0},"base":{"average":0.512}},"state":"similarNeedstriage"},"llama_1gpu_1_generate_throughput_tokens_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":390.8333333333333,"ratio":0,"diff":0},"base":{"average":390.8333333333333}},"state":"similarNeedstriage"},"llama_1gpu_2_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_2_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_2_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_2_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_3_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_3_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_3_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"llama_1gpu_3_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"pytorch_gpt2_0_generate_peak_memory_MB_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":559.3333333333334,"ratio":0,"diff":0},"base":{"average":559.3333333333334}},"state":"similarNeedstriage"}} \ No newline at end of file diff --git a/dana/configs/db/Inference/infos/benchmarks.statusSeries.json b/dana/configs/db/Inference/infos/benchmarks.statusSeries.json deleted file mode 100644 index 73fbce71d9b80431a5a60635647a66a1a0c44b75..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/infos/benchmarks.statusSeries.json +++ /dev/null @@ -1 +0,0 @@ -{"0":{"numSeries":11,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":11,"time":1692275225280},"1":{"numSeries":11,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":11,"time":1692275334598},"2":{"numSeries":11,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":11,"time":1692275426910},"3":{"numSeries":11,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":11,"time":1692283870417},"4":{"numSeries":11,"numSeriesSimilar":5,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":6,"time":1692291095083},"5":{"numSeries":11,"numSeriesSimilar":5,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":6,"time":1692291170786},"6":{"numSeries":11,"numSeriesSimilar":5,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":6,"time":1692291266839},"7":{"numSeries":11,"numSeriesSimilar":5,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":6,"time":1692291361418},"8":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692291469318},"9":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692291555219},"10":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692291642841},"11":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692291761275},"12":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692291847044},"13":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692298289286},"14":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692305466422},"15":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692341469380},"16":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692348689661},"17":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692348783633},"18":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692348884243},"19":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692355895529},"20":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692363509622},"21":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692363631569},"22":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692363715072},"23":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1692370293052},"24":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":2,"time":1692377474427},"25":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":2,"time":1692384686807},"26":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":2,"time":1692391888144},"27":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":2,"time":1692399077563},"28":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":2,"time":1692399184138},"29":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692399278969},"30":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692442288584},"31":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692607975003},"32":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692608012180},"33":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692615090962},"34":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692615226628},"35":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692622811487},"36":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692622988643},"37":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692623080298},"38":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692629657324},"39":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692629869674},"40":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692636778944},"41":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692636794965},"42":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692687080020},"43":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692687165099},"44":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692687263394},"45":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692694301726},"46":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692694390528},"47":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692694482074},"48":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692709210303},"49":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692709292383},"50":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692715904216},"51":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692723107175},"52":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692723182226},"53":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692730305120},"54":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692744685215},"55":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692754566210},"56":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692773480229},"57":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692773584829},"58":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692773674528},"59":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692787904252},"60":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692795589959},"61":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692795688941},"62":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692795797943},"63":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692802287312},"64":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692809491018},"65":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692816675679},"66":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692816797813},"67":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692816879790},"68":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692859876617},"69":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692859973988},"70":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692860070150},"71":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692867107900},"72":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692874312279},"73":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692874401502},"74":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692874490574},"75":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692882037829},"76":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692882103701},"77":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692888680135},"78":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692888789636},"79":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692895915410},"80":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692896011764},"81":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692896104865},"82":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692903093982},"83":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692903195433},"84":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692946310061},"85":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692953507035},"86":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692960712985},"87":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692968384584},"88":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692968469872},"89":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692968582036},"90":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692968665033},"91":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692968765672},"92":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692975082869},"93":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692975186133},"94":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692975266822},"95":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692982295850},"96":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692982405237},"97":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692982504068},"98":{"numSeries":11,"numSeriesSimilar":11,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":0,"time":1692989491321},"99":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1692989589880},"100":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693083084307},"101":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693219940008},"102":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693234273777},"103":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693234385418},"104":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693234482394},"105":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693234585637},"106":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693299129841},"107":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693299205297},"108":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693299312270},"109":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693306315710},"110":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693306413663},"111":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693314058834},"112":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693314152838},"113":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693314250444},"114":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693320686908},"115":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693320777528},"116":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693320872841},"117":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693320993813},"118":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693321075430},"119":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693321199703},"120":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693327916799},"121":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693328042125},"122":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693328154781},"123":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693328252544},"124":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693335091268},"125":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693335205944},"126":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693335306508},"127":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693335395421},"128":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693342294274},"129":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693342413408},"130":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693392694805},"131":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693400430600},"132":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693407090767},"133":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693407190789},"134":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693407300822},"135":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693407412832},"136":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693414306113},"137":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693414394949},"138":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693414512906},"139":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693414613779},"140":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693435928079},"141":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693479105303},"142":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693479196462},"143":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693479306297},"144":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693486850169},"145":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693493490652},"146":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693493598993},"147":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693500677787},"148":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693500791204},"149":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693507927925},"150":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693508010465},"151":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693522311897},"152":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693565500721},"153":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693565598090},"154":{"numSeries":11,"numSeriesSimilar":10,"numSeriesImproved":0,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693573227358},"155":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693579962471},"156":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693580079909},"157":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693587103524},"158":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693587207692},"159":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693587304330},"160":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693594289795},"161":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693594403166},"162":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693601497100},"163":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693817515961},"164":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693824694568},"165":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693824804810},"166":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693824914923},"167":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693825010399},"168":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693825107426},"169":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693832453000},"170":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693832538550},"171":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693839105028},"172":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693839200304},"173":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693846289755},"174":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693846400893},"175":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693853503270},"176":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693853602220},"177":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693853717882},"178":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693853800539},"179":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693853899680},"180":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693860690662},"181":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693903890544},"182":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693911085343},"183":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693911180504},"184":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693911295592},"185":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":2,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693911402087},"186":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":2,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693911485157},"187":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693918943947},"188":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693919041995},"189":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693919149208},"190":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693919245206},"191":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693925476024},"192":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693925588010},"193":{"numSeries":11,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693925693221},"194":{"numSeries":11,"numSeriesSimilar":7,"numSeriesImproved":2,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693932701016},"195":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693932807865},"196":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693939887235},"197":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1693940004131},"198":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693940092448},"199":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693940205179},"200":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693947107836},"201":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693947234096},"202":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693947328697},"203":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1693997481889},"204":{"numSeries":11,"numSeriesSimilar":8,"numSeriesImproved":1,"numSeriesRegression":2,"numSeriesUndefined":0,"time":1694005271377},"205":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1694005370468},"206":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1694011879567},"207":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1694011976935},"208":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1694019091997},"209":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1694019198520},"210":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1694062289035},"211":{"numSeries":11,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":0,"time":1695223133570},"212":{"numSeries":28,"numSeriesSimilar":9,"numSeriesImproved":1,"numSeriesRegression":1,"numSeriesUndefined":17,"time":1695223623818},"213":{"numSeries":28,"numSeriesSimilar":8,"numSeriesImproved":2,"numSeriesRegression":1,"numSeriesUndefined":17,"time":1695224061439},"214":{"numSeries":28,"numSeriesSimilar":8,"numSeriesImproved":2,"numSeriesRegression":1,"numSeriesUndefined":17,"time":1695226035840},"215":{"numSeries":28,"numSeriesSimilar":8,"numSeriesImproved":2,"numSeriesRegression":1,"numSeriesUndefined":17,"time":1695226444987},"216":{"numSeries":28,"numSeriesSimilar":6,"numSeriesImproved":3,"numSeriesRegression":7,"numSeriesUndefined":12,"time":1695236320342}} \ No newline at end of file diff --git a/dana/configs/db/Inference/infos/builds.json b/dana/configs/db/Inference/infos/builds.json deleted file mode 100644 index c3e6474430865dd0b4d58d3c682fcb515741c0fb..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/infos/builds.json +++ /dev/null @@ -1 +0,0 @@ -{"13713":{"buildId":13713,"infos":{"hash":"e50c9253f3a38d9db56c02d3d8d04e2f20070de8","abbrevHash":"e50c9253","authorName":"amyeroberts","authorEmail":"22614925+amyeroberts@users.noreply.github.com","subject":"YOLOS - reset default return_pixel_mask value (#25559)","url":null}},"13714":{"buildId":13714,"infos":{"hash":"8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6","abbrevHash":"8992589d","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip `test_onnx_runtime_optimize` for now (#25560)","url":null}},"13715":{"buildId":13715,"infos":{"hash":"e7e9261a202dd5623f488f1cb05007e88629f275","abbrevHash":"e7e9261a","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Docs`] Fix un-rendered images (#25561)","url":null}},"13716":{"buildId":13716,"infos":{"hash":"1791ef8df647a38b4fcb96c14ddd83a43861d713","abbrevHash":"1791ef8d","authorName":"Alex McKinney","authorEmail":"44398246+vvvm23@users.noreply.github.com","subject":"Adds `TRANSFORMERS_TEST_DEVICE` (#25506)","url":null}},"13717":{"buildId":13717,"infos":{"hash":"d2871b29754abd0f72cf42c299bb1c041519f7bc","abbrevHash":"d2871b29","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip `test_beam_search_xla_generate_simple` for `T5` (#25566)","url":null}},"13718":{"buildId":13718,"infos":{"hash":"d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad","abbrevHash":"d6bf08f7","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`resize_embedding`] Introduce `pad_to_multiple_of` and guidance (#25088)","url":null}},"13719":{"buildId":13719,"infos":{"hash":"5347d00092c4f2429389269dd912417e8daff848","abbrevHash":"5347d000","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`SwitchTransformers`] Remove unused module (#25427)","url":null}},"13720":{"buildId":13720,"infos":{"hash":"b4d554880013bf97718e1e1332715eeaba7dee17","abbrevHash":"b4d55488","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"🚨🚨🚨 [`SPM`] Finish fix spm models 🚨🚨🚨 (#25224)","url":null}},"13721":{"buildId":13721,"infos":{"hash":"9264fc915a3295c6fd0e05f54ee409917ac43f60","abbrevHash":"9264fc91","authorName":"Sina","authorEmail":"sina.moeini@gmail.com","subject":"Inconsistency in PreTrainedModel.resize_token_embeddings When ZeRO3 Is Enabled (#25394)","url":null}},"13722":{"buildId":13722,"infos":{"hash":"181d778f83bf6e58c1d69a7599afb2bb9ceff21e","abbrevHash":"181d778f","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`NllbMoe`] Update code to properly support loss computation (#25429)","url":null}},"13723":{"buildId":13723,"infos":{"hash":"d4c0aa1443557981a0690c0593be7b0f6ffd53cf","abbrevHash":"d4c0aa14","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Tests`] Fix failing 8bit test (#25564)","url":null}},"13724":{"buildId":13724,"infos":{"hash":"4e1dee0e8e06c1146d023c43812b88bfe2763329","abbrevHash":"4e1dee0e","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"Revert \"change version (#25387)\" (#25573)","url":null}},"13725":{"buildId":13725,"infos":{"hash":"c4c0ceff096473cb4e47ef2f067640bcdf0b32e0","abbrevHash":"c4c0ceff","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"add util for ram efficient loading of model when using fsdp (#25107)","url":null}},"13726":{"buildId":13726,"infos":{"hash":"b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0","abbrevHash":"b8f69d0d","authorName":"Yoach Lacombe","authorEmail":"52246514+ylacombe@users.noreply.github.com","subject":"Add Text-To-Speech pipeline (#24952)","url":null}},"13727":{"buildId":13727,"infos":{"hash":"427adc898ab49c321d58ff4011fa54133adf62c2","abbrevHash":"427adc89","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip `test_contrastive_generate` for `TFXLNet` (#25574)","url":null}},"13728":{"buildId":13728,"infos":{"hash":"4a27c13f1eee26393d60d381e500e1a61970e8ee","abbrevHash":"4a27c13f","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"add warning for 8bit optimizers (#25575)","url":null}},"13729":{"buildId":13729,"infos":{"hash":"659ab0423e6492b079d3df131445a39dda0651cb","abbrevHash":"659ab042","authorName":"AmΓ©lie T. Reymond","authorEmail":"amelietamrey@gmail.com","subject":"Fix typo in example code (#25583)","url":null}},"13730":{"buildId":13730,"infos":{"hash":"08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8","abbrevHash":"08e32519","authorName":"Kihoon Son","authorEmail":"75935546+kihoon71@users.noreply.github.com","subject":"Suggestions on Pipeline_webserver (#25570)","url":null}},"13731":{"buildId":13731,"infos":{"hash":"940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7","abbrevHash":"940d1a76","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Docs` / `BetterTransformer` ] Added more details about flash attention + SDPA (#25265)","url":null}},"13732":{"buildId":13732,"infos":{"hash":"c45aab75356563dbb8124aafbc2699853e177873","abbrevHash":"c45aab75","authorName":"Martin Malmsten","authorEmail":"martin@martinmalmsten.net","subject":"Added missing parenthesis in call to is_fsdp_enabled (#25585)","url":null}},"13733":{"buildId":13733,"infos":{"hash":"9d7afd2536ecd9816dd2ea9592a01e52fec17d17","abbrevHash":"9d7afd25","authorName":"Alex McKinney","authorEmail":"44398246+vvvm23@users.noreply.github.com","subject":"Replaces calls to `.cuda` with `.to(torch_device)` in tests (#25571)","url":null}},"13734":{"buildId":13734,"infos":{"hash":"30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e","abbrevHash":"30b3c46f","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`split_special_tokens`] Add support for `split_special_tokens` argument to encode (#25081)","url":null}},"13735":{"buildId":13735,"infos":{"hash":"bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5","abbrevHash":"bc3e20dc","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`Llama`] remove prompt and fix prefix finetuning (#25565)","url":null}},"13736":{"buildId":13736,"infos":{"hash":"8d2f953f4a59a6a6f337a75ef75bb8a78260ef73","abbrevHash":"8d2f953f","authorName":"Kashif Rasul","authorEmail":"kashif.rasul@gmail.com","subject":"[Time series Informer] fix dtype of cumsum (#25431)","url":null}},"13737":{"buildId":13737,"infos":{"hash":"636acc75b089aa3ce14b48ed3d9d6555565d1a6d","abbrevHash":"636acc75","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"fix z3 init when using accelerate launcher (#25589)","url":null}},"13738":{"buildId":13738,"infos":{"hash":"ef1534252f76231b4a6403c71866d4376e35292d","abbrevHash":"ef153425","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`TokenizerFast`] Fix setting prefix space in __init__ (#25563)","url":null}},"13739":{"buildId":13739,"infos":{"hash":"faed2ca46fb163082d154aa234fd5d30682d6bf1","abbrevHash":"faed2ca4","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`PEFT`] Peft integration alternative design (#25077)","url":null}},"13740":{"buildId":13740,"infos":{"hash":"6f4424bb086d3d090855862be5aff64eb8ed7101","abbrevHash":"6f4424bb","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Make TTS automodels importable (#25595)","url":null}},"13741":{"buildId":13741,"infos":{"hash":"4d64157ed3795090110dd8aceb9b7a5ff78bb247","abbrevHash":"4d64157e","authorName":"Hyeonseo Yun","authorEmail":"0525yhs@gmail.com","subject":"🌐 [i18n-KO] Translated `perf_train_tpu_tf.md` to Korean (#25433)","url":null}},"13742":{"buildId":13742,"infos":{"hash":"6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7","abbrevHash":"6c811a32","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"new model: IDEFICS via HuggingFaceM4 (#24796)","url":null}},"13743":{"buildId":13743,"infos":{"hash":"6b82d936d49956ba7b43c5ee590f4868de373b65","abbrevHash":"6b82d936","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"reattach hooks when using `resize_token_embeddings` (#25596)","url":null}},"13744":{"buildId":13744,"infos":{"hash":"1982dd3b15867c46e1c20645901b0de469fd935f","abbrevHash":"1982dd3b","authorName":"ydshieh","authorEmail":"ydshieh@users.noreply.github.com","subject":"Hotfix","url":null}},"13745":{"buildId":13745,"infos":{"hash":"f92cc7034a49959b247a46a210b912e56a6f977d","abbrevHash":"f92cc703","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Ignore all exceptions from signal in dynamic code (#25623)","url":null}},"13746":{"buildId":13746,"infos":{"hash":"9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0","abbrevHash":"9627c3da","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"Fix PEFT integration failures on nightly CI (#25624)","url":null}},"13747":{"buildId":13747,"infos":{"hash":"f09db47a71ddef60ccc120b953ee32326c9253a3","abbrevHash":"f09db47a","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Run doctest for new files (#25588)","url":null}},"13748":{"buildId":13748,"infos":{"hash":"2f8acfea1ca11fe3479fb379ccbded516d0cff57","abbrevHash":"2f8acfea","authorName":"Francisco Kurucz","authorEmail":"juanfkurucz@gmail.com","subject":"Fix test_modeling_mpt typo in model id (#25606)","url":null}},"13749":{"buildId":13749,"infos":{"hash":"5c67682b169576c4859700d551090ff79d450a9a","abbrevHash":"5c67682b","authorName":"Sylvain Gugger","authorEmail":"Sylvain.gugger@gmail.com","subject":"v4.33.0.dev0","url":null}},"13750":{"buildId":13750,"infos":{"hash":"e769ca3d287274143501b2803275367b2bff3e6a","abbrevHash":"e769ca3d","authorName":"Pranith Pashikanti","authorEmail":"117859007+pranith7@users.noreply.github.com","subject":"Added paper links in logitprocess.py (#25482)","url":null}},"13752":{"buildId":13752,"infos":{"hash":"2582bbde2ed3ee1b25c5886df35c07376ee930c4","abbrevHash":"2582bbde","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"fix ACT_FN (#25627)","url":null}},"13753":{"buildId":13753,"infos":{"hash":"2df24228d68872d79304b932a68cf56de3061f5b","abbrevHash":"2df24228","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip doctest for some recent files (#25631)","url":null}},"13754":{"buildId":13754,"infos":{"hash":"8608bf2049a10f8d23043e1bb196707a1c1b3fe5","abbrevHash":"8608bf20","authorName":"Rafael Padilla","authorEmail":"31217453+rafaelpadilla@users.noreply.github.com","subject":"🚨🚨🚨 changing default threshold and applying threshold before the rescale (#25608)","url":null}},"13755":{"buildId":13755,"infos":{"hash":"6f041fcbb853adc6c37da85515384ed9a9c5b181","abbrevHash":"6f041fcb","authorName":"mchau","authorEmail":"minhtriet09@gmail.com","subject":"fix documentation for CustomTrainer (#25635)","url":null}},"13756":{"buildId":13756,"infos":{"hash":"450a181d8b963b4e896be4aac701815aa554a6bb","abbrevHash":"450a181d","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Add Pop2Piano (#21785)","url":null}},"13757":{"buildId":13757,"infos":{"hash":"58c36bea74ef8f5a4464d04ab2191d0b1bec6de7","abbrevHash":"58c36bea","authorName":"Joe Mifsud","authorEmail":"jmif96@gmail.com","subject":"Support specifying revision in push_to_hub (#25578)","url":null}},"13758":{"buildId":13758,"infos":{"hash":"182b83749a7058547e1e882c603cbf97e20259f8","abbrevHash":"182b8374","authorName":"Tanay Mehta","authorEmail":"heyytanay@gmail.com","subject":"Add Number Normalisation for SpeechT5 (#25447)","url":null}},"13759":{"buildId":13759,"infos":{"hash":"6a314ea7cd01a78a58403bc83e7c637ef83e6b26","abbrevHash":"6a314ea7","authorName":"Blake Wyatt","authorEmail":"894305+xNul@users.noreply.github.com","subject":"[DOCS] MusicGen Docs Update (#25510)","url":null}},"13760":{"buildId":13760,"infos":{"hash":"88e51ba30673b42fa93b2e15760dd645d50753f0","abbrevHash":"88e51ba3","authorName":"Christopher Akiki","authorEmail":"christopher.akiki@protonmail.com","subject":"[MINOR:TYPO] (#25646)","url":null}},"13761":{"buildId":13761,"infos":{"hash":"edb28722c2e100a5d43e307bd4c59169c0cf86b8","abbrevHash":"edb28722","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Pass the proper token to PEFT integration in auto classes (#25649)","url":null}},"13762":{"buildId":13762,"infos":{"hash":"36291906896904b47692c707471de9a4a963335d","abbrevHash":"36291906","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Put IDEFICS in the right section of the doc (#25650)","url":null}},"13763":{"buildId":13763,"infos":{"hash":"62396cff46854dc53023236cfeb785993fa70067","abbrevHash":"62396cff","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"TF 2.14 compatibility (#25630)","url":null}},"13764":{"buildId":13764,"infos":{"hash":"e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c","abbrevHash":"e20fab0b","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"Fix bloom add prefix space (#25652)","url":null}},"13765":{"buildId":13765,"infos":{"hash":"fd56f7f0813d412c3e0848cbd6f94a23de2c07b7","abbrevHash":"fd56f7f0","authorName":"Rafael Padilla","authorEmail":"31217453+rafaelpadilla@users.noreply.github.com","subject":"removing unnecesssary extra parameter (#25643)","url":null}},"13766":{"buildId":13766,"infos":{"hash":"5eeaef921f70acd68073d1066ccb09d7c6e6f475","abbrevHash":"5eeaef92","authorName":"Alex McKinney","authorEmail":"44398246+vvvm23@users.noreply.github.com","subject":"Adds `TRANSFORMERS_TEST_BACKEND` (#25655)","url":null}},"13767":{"buildId":13767,"infos":{"hash":"908f853688c4d523780797f27f83af3c10418e92","abbrevHash":"908f8536","authorName":"AleksanderWWW","authorEmail":"alwojnarowicz@gmail.com","subject":"stringify config (#25637)","url":null}},"13768":{"buildId":13768,"infos":{"hash":"977b2f05d5697f33e51111e4834a127a9a76349f","abbrevHash":"977b2f05","authorName":"Gabriel Asher","authorEmail":"85761680+gaasher@users.noreply.github.com","subject":"Add input_embeds functionality to gpt_neo Causal LM (#25659)","url":null}},"13769":{"buildId":13769,"infos":{"hash":"40a0cabd93f86a7c09406159ad03a3804c2940da","abbrevHash":"40a0cabd","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Update doc toctree (#25661)","url":null}},"13770":{"buildId":13770,"infos":{"hash":"57943630e24651e6d954b912e7fcdb2b4c719cc4","abbrevHash":"57943630","authorName":"Wonhyeong Seo","authorEmail":"wonhseo@kakao.com","subject":"Add Llama2 resources (#25531)","url":null}},"13771":{"buildId":13771,"infos":{"hash":"51794bf21ee6c9b9a702a3bceeea167e9518880b","abbrevHash":"51794bf2","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`SPM`] Patch `spm` Llama and T5 (#25656)","url":null}},"13772":{"buildId":13772,"infos":{"hash":"db587220844538787f560c8a797f1268fef9099d","abbrevHash":"db587220","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`GPTNeo`] Add input_embeds functionality to gpt_neo Causal LM (#25664)","url":null}},"13773":{"buildId":13773,"infos":{"hash":"3d1edb6c5d36bf6426e72223f534266ff29c45c4","abbrevHash":"3d1edb6c","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"fix wrong path in some doc (#25658)","url":null}},"13774":{"buildId":13774,"infos":{"hash":"b413e0610b42d4c8d9c7a69c06440ad27c69808b","abbrevHash":"b413e061","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Remove `utils/documentation_tests.txt` (#25680)","url":null}},"13775":{"buildId":13775,"infos":{"hash":"2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43","abbrevHash":"2cf87e2b","authorName":"Nora Belrose","authorEmail":"39116809+norabelrose@users.noreply.github.com","subject":"Prevent Dynamo graph fragmentation in GPTNeoX with torch.baddbmm fix (#24941)","url":null}},"13776":{"buildId":13776,"infos":{"hash":"77cb2ab7921c5b2336916eb7874c807bf86ad33c","abbrevHash":"77cb2ab7","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"⚠️ [CLAP] Fix dtype of logit scales in init (#25682)","url":null}},"13777":{"buildId":13777,"infos":{"hash":"8657ec68fc01c289245f3c71725353eef055fc3c","abbrevHash":"8657ec68","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Sets the stalebot to 10 AM CEST (#25678)","url":null}},"13778":{"buildId":13778,"infos":{"hash":"2189a7f54a5ec10a7559a93fa7e6eaca527d2941","abbrevHash":"2189a7f5","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix `pad_token` check condition (#25685)","url":null}},"13779":{"buildId":13779,"infos":{"hash":"6add3b313defc35b5d8ae3d946131aeb625e0441","abbrevHash":"6add3b31","authorName":"sanjeevk-os","authorEmail":"73068589+sanjeevk-os@users.noreply.github.com","subject":"[DOCS] Added docstring example for EpsilonLogitsWarper #24783 (#25378)","url":null}},"13780":{"buildId":13780,"infos":{"hash":"656e17f6f7eded9df87ad59cbd064fdf5f44f708","abbrevHash":"656e17f6","authorName":"Phuc Van Phan","authorEmail":"phanphuc1100@gmail.com","subject":"correct resume training steps number in progress bar (#25691)","url":null}},"13781":{"buildId":13781,"infos":{"hash":"3c2383b1c6eb860c0511d081e670d1782cd66b8d","abbrevHash":"3c2383b1","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: general test for decoder-only generation from `inputs_embeds` (#25687)","url":null}},"13782":{"buildId":13782,"infos":{"hash":"4d40109c3a93c9b8bbca204cb046ed510f1c72e8","abbrevHash":"4d40109c","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Fix typo in `configuration_gpt2.py` (#25676)","url":null}},"13783":{"buildId":13783,"infos":{"hash":"68fa9a5937ae7aa707f5ff2639aa36a37a0a9928","abbrevHash":"68fa9a59","authorName":"Sylvain Gugger","authorEmail":"Sylvain.gugger@gmail.com","subject":"Skip broken tests","url":null}},"13784":{"buildId":13784,"infos":{"hash":"b85b88069a778f0ffbb7a0f6389e18fca9432dcf","abbrevHash":"b85b8806","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"fix ram efficient fsdp init (#25686)","url":null}},"13785":{"buildId":13785,"infos":{"hash":"6e6da5e4b860d98d3b625fe5c63db4e83087b6ff","abbrevHash":"6e6da5e4","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`LlamaTokenizer`] make unk_token_length a property (#25689)","url":null}},"13786":{"buildId":13786,"infos":{"hash":"c2123626aa3cd6c1ae4869ec9bc8869d1a408166","abbrevHash":"c2123626","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Update list of persons to tag (#25708)","url":null}},"13787":{"buildId":13787,"infos":{"hash":"f01459c75db47308698b19b8b1bac1ae1159cd31","abbrevHash":"f01459c7","authorName":"Tom Aarsen","authorEmail":"37621491+tomaarsen@users.noreply.github.com","subject":"docs: Resolve typos in warning text (#25711)","url":null}},"13788":{"buildId":13788,"infos":{"hash":"8fff61b9db86ac3ad92deea48d504b5dafc3b78e","abbrevHash":"8fff61b9","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix failing `test_batch_generation` for bloom (#25718)","url":null}},"13789":{"buildId":13789,"infos":{"hash":"70b49f023c9f6579c516671604468a491227b4da","abbrevHash":"70b49f02","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`PEFT`] Fix peft version (#25710)","url":null}},"13790":{"buildId":13790,"infos":{"hash":"2febd506149d039b51590f5dc7b45f0d8624819d","abbrevHash":"2febd506","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Fix number of minimal calls to the Hub with peft integration (#25715)","url":null}},"13791":{"buildId":13791,"infos":{"hash":"584eeb5387193d352da976cc3d1305f5c3404850","abbrevHash":"584eeb53","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`AutoGPTQ`] Add correct installation of GPTQ library + fix slow tests (#25713)","url":null}},"13792":{"buildId":13792,"infos":{"hash":"0a365c3e6a0e174302debff4023182838607acf1","abbrevHash":"0a365c3e","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: nudge towards `do_sample=False` when `temperature=0.0` (#25722)","url":null}},"13793":{"buildId":13793,"infos":{"hash":"fecf08560cd9843b569279dd6f665c987890af4c","abbrevHash":"fecf0856","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`from_pretrained`] Simpler code for peft (#25726)","url":null}},"13794":{"buildId":13794,"infos":{"hash":"7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b","abbrevHash":"7a6efe1e","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"[idefics] idefics-9b test use 4bit quant (#25734)","url":null}},"13795":{"buildId":13795,"infos":{"hash":"1b2381c46b834a89e447f7a01f0961c4e940d117","abbrevHash":"1b2381c4","authorName":"amyeroberts","authorEmail":"22614925+amyeroberts@users.noreply.github.com","subject":"ImageProcessor - check if input pixel values between 0-255 (#25688)","url":null}},"13796":{"buildId":13796,"infos":{"hash":"fd0b94fd7b0c00c68e2e9f054793287808e33608","abbrevHash":"fd0b94fd","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`from_pretrained`] Fix failing PEFT tests (#25733)","url":null}},"13797":{"buildId":13797,"infos":{"hash":"021887682224daf29264f98c759a45e88c82e244","abbrevHash":"02188768","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[ASR Pipe Test] Fix CTC timestamps error message (#25727)","url":null}},"13798":{"buildId":13798,"infos":{"hash":"f26099e7b5cf579f99a42bab6ddd371bf2c8d548","abbrevHash":"f26099e7","authorName":"Wonhyeong Seo","authorEmail":"wonhseo@kakao.com","subject":"🌐 [i18n-KO] Translated `visual_question_answering.md` to Korean (#25679)","url":null}},"13799":{"buildId":13799,"infos":{"hash":"ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512","abbrevHash":"ae320fa5","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`PEFT`] Fix PeftConfig save pretrained when calling `add_adapter` (#25738)","url":null}},"13800":{"buildId":13800,"infos":{"hash":"8968fface4e804f380391d880f569578b84b4121","abbrevHash":"8968ffac","authorName":"Anthony Susevski","authorEmail":"77211520+asusevski@users.noreply.github.com","subject":"fixed typo in speech encoder decoder doc (#25745)","url":null}},"13801":{"buildId":13801,"infos":{"hash":"cb8e3ee25fc2349e9262faa1e0c35d80978349fe","abbrevHash":"cb8e3ee2","authorName":"Pedro Cuenca","authorEmail":"pedro@huggingface.co","subject":"Add FlaxCLIPTextModelWithProjection (#25254)","url":null}},"13802":{"buildId":13802,"infos":{"hash":"85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b","abbrevHash":"85cf90a1","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: add missing logits processors docs (#25653)","url":null}},"13803":{"buildId":13803,"infos":{"hash":"c6a84b72025fa7795f7fb5c97e3de7861a4dfb01","abbrevHash":"c6a84b72","authorName":"Jess","authorEmail":"jessbpeck@gmail.com","subject":"[DOCS] Add example for HammingDiversityLogitsProcessor (#25481)","url":null}},"13804":{"buildId":13804,"infos":{"hash":"494e96d8d61277cd7509e5f90aa14e6ac604063a","abbrevHash":"494e96d8","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: logits processors are doctested and fix broken doctests (#25692)","url":null}},"13805":{"buildId":13805,"infos":{"hash":"0770ce6cfbcd8334084f9f2c4302e8c71ac931ee","abbrevHash":"0770ce6c","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[CLAP] Fix logit scales dtype for fp16 (#25754)","url":null}},"13806":{"buildId":13806,"infos":{"hash":"dd8b7d28aec80013ad2b25ead4200eea1a6a767e","abbrevHash":"dd8b7d28","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`Sentencepiece`] make sure `legacy` do not require `protobuf` (#25684)","url":null}},"13807":{"buildId":13807,"infos":{"hash":"35c570c80edb9f56aa8339c03d3975847a85cb9d","abbrevHash":"35c570c8","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"fix encoder hook (#25735)","url":null}},"13808":{"buildId":13808,"infos":{"hash":"8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c","abbrevHash":"8b0a7bfc","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Docs: fix indentation in `HammingDiversityLogitsProcessor` (#25756)","url":null}},"13809":{"buildId":13809,"infos":{"hash":"4d9e45f3ef624cab41f605d7439862ce23ca806a","abbrevHash":"4d9e45f3","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for several pytorch models (batch-3) (#25705)","url":null}},"13810":{"buildId":13810,"infos":{"hash":"4b796978656e461177a83d58ec3c2b06152c63db","abbrevHash":"4b796978","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"🚨🚨🚨 [`Refactor`] Move third-party related utility files into `integrations/` folder 🚨🚨🚨 (#25599)","url":null}},"13811":{"buildId":13811,"infos":{"hash":"0040469bb8e718f4ffafef829e497805df1aa1fb","abbrevHash":"0040469b","authorName":"Tianlin Liu","authorEmail":"tliu@jacobs-alumni.de","subject":"Correct attention mask dtype for Flax GPT2 (#25636)","url":null}},"13812":{"buildId":13812,"infos":{"hash":"74081cb5fa52540bbdde620942bd3a657af85c8e","abbrevHash":"74081cb5","authorName":"Alan Ji","authorEmail":"hzji210@gmail.com","subject":"fix a typo in docsting (#25759)","url":null}},"13813":{"buildId":13813,"infos":{"hash":"015f8e110d270a0ad42de4ae5b98198d69eb1964","abbrevHash":"015f8e11","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`CodeLlama`] Add support for `CodeLlama` (#25740)","url":null}},"13814":{"buildId":13814,"infos":{"hash":"960807f62e53676723ab8281019219864ef3db4d","abbrevHash":"960807f6","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"[idefics] small fixes (#25764)","url":null}},"13815":{"buildId":13815,"infos":{"hash":"686c68f64c9d0181bd54d4d2e2446543c3eca1fa","abbrevHash":"686c68f6","authorName":"Tigran Khachatryan","authorEmail":"65066173+Geometrein@users.noreply.github.com","subject":"Add docstrings and fix VIVIT examples (#25628)","url":null}},"13816":{"buildId":13816,"infos":{"hash":"de139702a17003c7dd02e671a9a7417d346c3df2","abbrevHash":"de139702","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`LlamaFamiliy`] add a tip about dtype (#25794)","url":null}},"13817":{"buildId":13817,"infos":{"hash":"cb91ec67b54c1a8a9a24825165161c90fe7c0e51","abbrevHash":"cb91ec67","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for several pytorch models (batch-2) (#25557)","url":null}},"13818":{"buildId":13818,"infos":{"hash":"ed915cff9751e3e41ebb4733b87c45c938daf116","abbrevHash":"ed915cff","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for pytorch models (final batch) (#25750)","url":null}},"13819":{"buildId":13819,"infos":{"hash":"886b6be081e1bc28e8c6cbc93eba934f83677ab2","abbrevHash":"886b6be0","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for several pytorch models (batch-4) (#25749)","url":null}},"13820":{"buildId":13820,"infos":{"hash":"50573c648ae953dcc1b94d663651f07fb02268f4","abbrevHash":"50573c64","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"[idefics] fix vision's `hidden_act` (#25787)","url":null}},"13821":{"buildId":13821,"infos":{"hash":"738ecd17d869577d263eb1fba3fee0ab8ec5b5a2","abbrevHash":"738ecd17","authorName":"Arup De","authorEmail":"arde@linkedin.com","subject":"Arde/fsdp activation checkpointing (#25771)","url":null}},"13822":{"buildId":13822,"infos":{"hash":"39c37fe45c12bc2f936313330fe5c82319adb6e3","abbrevHash":"39c37fe4","authorName":"Aman Gupta Karmani","authorEmail":"aman@tmm1.net","subject":"Fix incorrect Boolean value in deepspeed example (#25788)","url":null}},"13823":{"buildId":13823,"infos":{"hash":"99c3d44906ec448c4559fecdc9a63eda364db4d4","abbrevHash":"99c3d449","authorName":"Lorenzo Battistela","authorEmail":"70359945+Lorenzobattistela@users.noreply.github.com","subject":"fixing name position_embeddings to object_queries (#24652)","url":null}},"13824":{"buildId":13824,"infos":{"hash":"4c21da5e347bfc53ee4ec5b71a23721fefe6822c","abbrevHash":"4c21da5e","authorName":"NielsRogge","authorEmail":"48327001+NielsRogge@users.noreply.github.com","subject":"Add ViTDet (#25524)","url":null}},"13825":{"buildId":13825,"infos":{"hash":"77713d11f6656314fb06c217cf43c4b8f5c64df8","abbrevHash":"77713d11","authorName":"NielsRogge","authorEmail":"48327001+NielsRogge@users.noreply.github.com","subject":"[DINOv2] Add backbone class (#25520)","url":null}},"13826":{"buildId":13826,"infos":{"hash":"c9bae84eb58745784e5cc6491f3f4958ba4706c3","abbrevHash":"c9bae84e","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"Resolving Attribute error when using the FSDP ram efficient feature (#25820)","url":null}},"13827":{"buildId":13827,"infos":{"hash":"dc0c102954ff1f6bcb47de85afea5edc81fc8c7f","abbrevHash":"dc0c1029","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Docs`] More clarifications on BT + FA (#25823)","url":null}},"13828":{"buildId":13828,"infos":{"hash":"3dd030d264915c71a0bdd23838dbb27156f44ed1","abbrevHash":"3dd030d2","authorName":"zspo","authorEmail":"songpo.zhang@foxmail.com","subject":"fix register (#25779)","url":null}},"13829":{"buildId":13829,"infos":{"hash":"9525515cd40ab2632cf40e1a9d21f7751b02eceb","abbrevHash":"9525515c","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Minor wording changes for Code Llama (#25815)","url":null}},"13830":{"buildId":13830,"infos":{"hash":"5b5ee235f3239413e9614bd02032b1a203dab710","abbrevHash":"5b5ee235","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`LlamaTokenizer`] `tokenize` nits. (#25793)","url":null}},"13831":{"buildId":13831,"infos":{"hash":"2ee60b757e30815529239c87235a2b794fa60286","abbrevHash":"2ee60b75","authorName":"Dongkeun Yoon","authorEmail":"57797966+MattYoon@users.noreply.github.com","subject":"fix warning trigger for embed_positions when loading xglm (#25798)","url":null}},"13832":{"buildId":13832,"infos":{"hash":"173fa7da9c29c4e3a683ac5d489cde4e7220c98a","abbrevHash":"173fa7da","authorName":"SeongWooChoi","authorEmail":"46990061+nuatmochoi@users.noreply.github.com","subject":"🌐 [i18n-KO] Translated peft.md to Korean (#25706)","url":null}},"13833":{"buildId":13833,"infos":{"hash":"33aa0af70c70d9a8205b0ff0d1d4e68807fbb173","abbrevHash":"33aa0af7","authorName":"MinJae Kang","authorEmail":"39152134+mjk0618@users.noreply.github.com","subject":"🌐 [i18n-KO] `model_memory_anatomy.md` to Korean (#25755)","url":null}},"13834":{"buildId":13834,"infos":{"hash":"483861d52db59cf99219a0281695d1e7e8859218","abbrevHash":"483861d5","authorName":"Chau Nguyen","authorEmail":"60038822+chaumng@users.noreply.github.com","subject":"Error with checking args.eval_accumulation_steps to gather tensors (#25819)","url":null}},"13835":{"buildId":13835,"infos":{"hash":"a35f889acc91cb40bd8c6659691aeb27581a69b1","abbrevHash":"a35f889a","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Tests: detect lines removed from \"utils/not_doctested.txt\" and doctest ALL generation files (#25763)","url":null}},"13836":{"buildId":13836,"infos":{"hash":"d97fd871e5ba57b23b1775ef2939ffea128dd08d","abbrevHash":"d97fd871","authorName":"heuristicwave","authorEmail":"31366038+heuristicwave@users.noreply.github.com","subject":"🌐 [i18n-KO] Translated `add_new_pipeline.md` to Korean (#25498)","url":null}},"13837":{"buildId":13837,"infos":{"hash":"aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1","abbrevHash":"aade754b","authorName":"Sohyun Sim","authorEmail":"96299403+sim-so@users.noreply.github.com","subject":"🌐 [i18n-KO] TranslatedΒ `community.md` to Korean (#25674)","url":null}},"13838":{"buildId":13838,"infos":{"hash":"245dcc49ef9862a7165aec7be9c4a3299b8d06a1","abbrevHash":"245dcc49","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"🀦update warning to If you want to use the new behaviour, set `legacy=… (#25833)","url":null}},"13839":{"buildId":13839,"infos":{"hash":"0e59c93983b84610db9a4d88be1531ba8d745ff9","abbrevHash":"0e59c939","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"update remaining `Pop2Piano` checkpoints (#25827)","url":null}},"13840":{"buildId":13840,"infos":{"hash":"0daeeb40a10178ce219fffbf41791330524eedc1","abbrevHash":"0daeeb40","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[AutoTokenizer] Add data2vec to mapping (#25835)","url":null}},"13841":{"buildId":13841,"infos":{"hash":"ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7","abbrevHash":"ce2d4bc6","authorName":"amyeroberts","authorEmail":"22614925+amyeroberts@users.noreply.github.com","subject":"MaskFormer,Mask2former - reduce memory load (#25741)","url":null}},"13842":{"buildId":13842,"infos":{"hash":"dbc16f4404eca4a75459683d5135f6accea35a02","abbrevHash":"dbc16f44","authorName":"Haylee SchΓ€fer","authorEmail":"mail@inventivetalent.org","subject":"Support loading base64 images in pipelines (#25633)","url":null}},"13843":{"buildId":13843,"infos":{"hash":"8c75cfdaeeb9ae960cfdb0ba780d35add282b2df","abbrevHash":"8c75cfda","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25834)","url":null}},"13844":{"buildId":13844,"infos":{"hash":"07998ef39926b76d3f6667025535d0859eed61c3","abbrevHash":"07998ef3","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: models with custom `generate()` return `True` in `can_generate()` (#25838)","url":null}},"13845":{"buildId":13845,"infos":{"hash":"1bf2f36daf6731f001ea88ae53ba96acfb6c8497","abbrevHash":"1bf2f36d","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25832)","url":null}},"13846":{"buildId":13846,"infos":{"hash":"52574026b6740a3882d6dd1cbf1e1663d4cea27b","abbrevHash":"52574026","authorName":"Aman Gupta Karmani","authorEmail":"aman@tmm1.net","subject":"minor typo fix in PeftAdapterMixin docs (#25829)","url":null}},"13847":{"buildId":13847,"infos":{"hash":"62399d6f3568d1436e3e0364a32d13e32bb78cb6","abbrevHash":"62399d6f","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Add flax installation in daily doctest workflow (#25860)","url":null}},"13848":{"buildId":13848,"infos":{"hash":"09dc99517f5f38ee210cf1145a7b17fc99b37dac","abbrevHash":"09dc9951","authorName":"Juan Pizarro","authorEmail":"jpizarrom@gmail.com","subject":"Add Blip2 model in VQA pipeline (#25532)","url":null}},"13849":{"buildId":13849,"infos":{"hash":"ed290b083751590ba79e3a699608c8e9b70d5d9e","abbrevHash":"ed290b08","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Remote tools are turned off (#25867)","url":null}},"13850":{"buildId":13850,"infos":{"hash":"f73c20970c5cf575dd341d18216c42bec0b8a0e5","abbrevHash":"f73c2097","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix imports (#25869)","url":null}},"13851":{"buildId":13851,"infos":{"hash":"72298178bcbb5f3cb34af5283ac36dad8b869fb5","abbrevHash":"72298178","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"fix max_memory for bnb (#25842)","url":null}},"13852":{"buildId":13852,"infos":{"hash":"459bc6738c162511fabf5b9102171db1fc8bb53e","abbrevHash":"459bc673","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Docs: fix example failing doctest in `generation_strategies.md ` (#25874)","url":null}},"13853":{"buildId":13853,"infos":{"hash":"9219d1427bf3e868c76fd495bb469cf5e1542242","abbrevHash":"9219d142","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"pin pandas==2.0.3 (#25875)","url":null}},"13854":{"buildId":13854,"infos":{"hash":"1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f","abbrevHash":"1c6f072d","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Reduce CI output (#25876)","url":null}},"13855":{"buildId":13855,"infos":{"hash":"716bb2e3910fd4872064c55b0d8bc3dad754d129","abbrevHash":"716bb2e3","authorName":"NielsRogge","authorEmail":"48327001+NielsRogge@users.noreply.github.com","subject":"[ViTDet] Fix doc tests (#25880)","url":null}},"13856":{"buildId":13856,"infos":{"hash":"f8468b4facb2e46a1766a256b9fe47b0865d6854","abbrevHash":"f8468b4f","authorName":"qihqi","authorEmail":"qihan.dev@gmail.com","subject":"For xla tensors, use an alternative way to get a unique id (#25802)","url":null}},"13857":{"buildId":13857,"infos":{"hash":"e95bcaeef0bd6b084b7615faae411a14d50bcfee","abbrevHash":"e95bcaee","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"fix ds z3 checkpointing when `stage3_gather_16bit_weights_on_model_save=False` (#25817)","url":null}},"13858":{"buildId":13858,"infos":{"hash":"99fc3ac8ac2d79f19e983b63c2992b78f4509111","abbrevHash":"99fc3ac8","authorName":"Vibhor Kumar","authorEmail":"vibhor.kumar.me@gmail.com","subject":"Modify efficient GPU training doc with now-available adamw_bnb_8bit optimizer (#25807)","url":null}},"13859":{"buildId":13859,"infos":{"hash":"3b39b906183ed08d9961908eb73104aeea345d11","abbrevHash":"3b39b906","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`TokenizerFast`] `can_save_slow_tokenizer` as a property for when `vocab_file`'s folder was removed (#25626)","url":null}},"13860":{"buildId":13860,"infos":{"hash":"a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c","abbrevHash":"a39ebbf8","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`CodeLlama`] Fix CI (#25890)","url":null}},"13861":{"buildId":13861,"infos":{"hash":"2be8a9098e06262bdd5c16b5e8a70f145df88e96","abbrevHash":"2be8a909","authorName":"raghavanone","authorEmail":"115454562+raghavanone@users.noreply.github.com","subject":"Save image_processor while saving pipeline (ImageSegmentationPipeline) (#25884)","url":null}},"13862":{"buildId":13862,"infos":{"hash":"9c5acca0028b550e1328ba7e2f16418fe0a0c634","abbrevHash":"9c5acca0","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`InstructBlip`] FINAL Fix instructblip test (#25887)","url":null}},"13863":{"buildId":13863,"infos":{"hash":"eaf5e98ec03d73c24367438100b05c02ce5ad10c","abbrevHash":"eaf5e98e","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for tf models batch 1 (#25853)","url":null}},"13864":{"buildId":13864,"infos":{"hash":"3fb1535b09901db72a41095c007c29bcdf02e3ae","abbrevHash":"3fb1535b","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Update `setup.py` (#25893)","url":null}},"13865":{"buildId":13865,"infos":{"hash":"0f08cd205a440d23e6bf924cddd73ff48e09fe35","abbrevHash":"0f08cd20","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Smarter check for `is_tensor` (#25871)","url":null}},"13866":{"buildId":13866,"infos":{"hash":"ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1","abbrevHash":"ef10dbce","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"remove torch_dtype override (#25894)","url":null}},"13867":{"buildId":13867,"infos":{"hash":"4ece3b9433ea0bedff0d64fe00623c35766d7d44","abbrevHash":"4ece3b94","authorName":"Matthijs Hollemans","authorEmail":"mail@hollance.com","subject":"add VITS model (#24085)","url":null}},"13868":{"buildId":13868,"infos":{"hash":"024acd271b60568bba214901a9e71d67c44353dc","abbrevHash":"024acd27","authorName":"pkumc","authorEmail":"machijaychou@163.com","subject":"fix FSDP model resume optimizer & scheduler (#25852)","url":null}},"13869":{"buildId":13869,"infos":{"hash":"53e2fd785b2792e20f13189d30d1d4ef7d9cf673","abbrevHash":"53e2fd78","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Falcon: Add RoPE scaling (#25878)","url":null}},"13870":{"buildId":13870,"infos":{"hash":"16d6e3087cd35cb08ee24137900340d6924103dd","abbrevHash":"16d6e308","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Better error message for pipeline loading (#25912)","url":null}},"13871":{"buildId":13871,"infos":{"hash":"69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b","abbrevHash":"69c5b8f1","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Remove broken docs for MusicGen (#25905)","url":null}},"13872":{"buildId":13872,"infos":{"hash":"be0e189bd3f2b5b960a4062361ead32c055a362e","abbrevHash":"be0e189b","authorName":"Zach Mueller","authorEmail":"muellerzr@gmail.com","subject":"Revert frozen training arguments (#25903)","url":null}},"13873":{"buildId":13873,"infos":{"hash":"b439129e74bb207138e49ffb1f147bd94aa58574","abbrevHash":"b439129e","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Add to TTA pipeline (#25906)","url":null}},"13874":{"buildId":13874,"infos":{"hash":"1fa2d89a9bb98a15e9720190e07d272a42f03d28","abbrevHash":"1fa2d89a","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[MMS] Update docs with HF TTS implementation (#25907)","url":null}},"13875":{"buildId":13875,"infos":{"hash":"3587769c08ffaf42c99f6882d4ad76d3a3669e5e","abbrevHash":"3587769c","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Only trigger tokenizer warning for uroman (#25915)","url":null}},"13876":{"buildId":13876,"infos":{"hash":"a4dd53d88e4852f023332d284ff07a01afcd5681","abbrevHash":"a4dd53d8","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"Update-llama-code (#25826)","url":null}},"13877":{"buildId":13877,"infos":{"hash":"0afa5071bd84e44301750fdc594e33db102cf374","abbrevHash":"0afa5071","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update model_memory_anatomy.md (#25896)","url":null}},"13878":{"buildId":13878,"infos":{"hash":"ab8cba824e3887d90cb9f4d5866fde9243f2c9fe","abbrevHash":"ab8cba82","authorName":"ydshieh","authorEmail":"ydshieh@users.noreply.github.com","subject":"CI: hotfix (skip VitsModelTest::test_initialization)","url":null}},"13879":{"buildId":13879,"infos":{"hash":"b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34","abbrevHash":"b1d475f6","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip offload tests for `ViTDet` (#25913)","url":null}},"13880":{"buildId":13880,"infos":{"hash":"0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72","abbrevHash":"0f0e1a2c","authorName":"omahs","authorEmail":"73983677+omahs@users.noreply.github.com","subject":"Fix typos (#25936)","url":null}},"13881":{"buildId":13881,"infos":{"hash":"51e1e8120bc569c3f60f7c73ff6e38a90e6229f7","abbrevHash":"51e1e812","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update community.md (#25928)","url":null}},"13882":{"buildId":13882,"infos":{"hash":"d4407a3bd13b8ec3978b9ba8e4e45cb11f230437","abbrevHash":"d4407a3b","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update autoclass_tutorial.md (#25929)","url":null}},"13883":{"buildId":13883,"infos":{"hash":"604a6c51ae0b4ce5e8213ea86ed9c71373223a5d","abbrevHash":"604a6c51","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25941)","url":null}},"13884":{"buildId":13884,"infos":{"hash":"f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0","abbrevHash":"f435003e","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[MMS] Fix pip install in docs (#25949)","url":null}},"13885":{"buildId":13885,"infos":{"hash":"eb984418e2f26f749e832730b264d7762e6be8c2","abbrevHash":"eb984418","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Handle deprecated weight norm (#25946)","url":null}},"13886":{"buildId":13886,"infos":{"hash":"bfb1895e3346cb8a2bf2560c75d45e70edf46a47","abbrevHash":"bfb1895e","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Import deepspeed utilities from integrations (#25919)","url":null}},"13887":{"buildId":13887,"infos":{"hash":"7cd01d4e384f7ce9c18a81a4decb2c2531542661","abbrevHash":"7cd01d4e","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25922)","url":null}},"13888":{"buildId":13888,"infos":{"hash":"d750eff62757a46160b6f73b95e8035c49c2971b","abbrevHash":"d750eff6","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Fix init test (#25945)","url":null}},"13889":{"buildId":13889,"infos":{"hash":"034bc5d26ad7c0e284265d92d3da39d786138545","abbrevHash":"034bc5d2","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"Add proper Falcon docs and conversion script (#25954)","url":null}},"13890":{"buildId":13890,"infos":{"hash":"3a479672ea95b058b621dcdcd1d15b73f36dc25a","abbrevHash":"3a479672","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Fix failing test (#25963)","url":null}},"13891":{"buildId":13891,"infos":{"hash":"44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b","abbrevHash":"44d2c199","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix smart check (#25955)","url":null}},"13892":{"buildId":13892,"infos":{"hash":"040c4613c2fac59f16e333a630d9a69b6ff9ca5d","abbrevHash":"040c4613","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for tf models final batch (#25883)","url":null}},"13893":{"buildId":13893,"infos":{"hash":"22a69f1d7d520d5fbccbdb163d05db56bf79724c","abbrevHash":"22a69f1d","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Put Falcon back (#25960)","url":null}},"13894":{"buildId":13894,"infos":{"hash":"49b69fe0d4885e258dbf657e35c445a94ffd09ae","abbrevHash":"49b69fe0","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Falcon`] Remove SDPA for falcon to support earlier versions of PyTorch (< 2.0) (#25947)","url":null}},"13895":{"buildId":13895,"infos":{"hash":"d8e13b3e04da9e61c6f16df43815656f59688abd","abbrevHash":"d8e13b3e","authorName":"Lysandre","authorEmail":"lysandre@huggingface.co","subject":"v4.34.dev.0","url":null}},"13896":{"buildId":13896,"infos":{"hash":"404ff8fc17599788a546818373be113b1fc8456a","abbrevHash":"404ff8fc","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Fix typo (#25966)","url":null}},"13897":{"buildId":13897,"infos":{"hash":"feec56959afe480e57b2acc177111ae18a5ea757","abbrevHash":"feec5695","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix Detr CI (#25972)","url":null}},"13898":{"buildId":13898,"infos":{"hash":"fbbe1b8a406a09b47673f606f0af6f3d5e045575","abbrevHash":"fbbe1b8a","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix `test_load_img_url_timeout` (#25976)","url":null}},"13899":{"buildId":13899,"infos":{"hash":"1cc3bc22fed6ffc5937cf66c799dd97840622e69","abbrevHash":"1cc3bc22","authorName":"Huazhong Ji","authorEmail":"hzji210@gmail.com","subject":"nn.Identity is not required to be compatible with PyTorch < 1.1.0 as the minimum PyTorch version we currently support is 1.10.0 (#25974)","url":null}},"13900":{"buildId":13900,"infos":{"hash":"52a46dc57bb653aa9dab440e4bb70988b15cdc7e","abbrevHash":"52a46dc5","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Add `Pop2Piano` space demo. (#25975)","url":null}},"13901":{"buildId":13901,"infos":{"hash":"6f125aaa4807d84e9004ce79035c7653aedfd630","abbrevHash":"6f125aaa","authorName":"Kai","authorEmail":"140378742+kai01ai@users.noreply.github.com","subject":"fix typo (#25981)","url":null}},"13902":{"buildId":13902,"infos":{"hash":"391f26459ab1a392aedc82e0546ce5f88acb7cd5","abbrevHash":"391f2645","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Use main in conversion script (#25973)","url":null}},"13903":{"buildId":13903,"infos":{"hash":"6316ce8d2703f210b91853aba90d44755a241334","abbrevHash":"6316ce8d","authorName":"Julien Chaumond","authorEmail":"julien@huggingface.co","subject":"[doc] Always call it Agents for consistency (#25958)","url":null}},"13904":{"buildId":13904,"infos":{"hash":"7011cd8667d7a51bd608e6a722f061d5ac5f4166","abbrevHash":"7011cd86","authorName":"Traun Leyden","authorEmail":"traun.leyden@gmail.com","subject":"Update RAG README.md with correct path to examples/seq2seq (#25953)","url":null}},"13905":{"buildId":13905,"infos":{"hash":"aea761499f4b1193f2706f471442da6f9df65d65","abbrevHash":"aea76149","authorName":"Sahel Sharify","authorEmail":"sahel.sharifi@gmail.com","subject":"Update training_args.py to remove the runtime error (#25920)","url":null}},"13906":{"buildId":13906,"infos":{"hash":"9a70d6e56f2801c9a3aa80ca97e6a32024db72b7","abbrevHash":"9a70d6e5","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Trainer: delegate default generation values to `generation_config` (#25987)","url":null}},"13907":{"buildId":13907,"infos":{"hash":"aa5c94d38deb3960e809b75bc959dc4357d3dd2b","abbrevHash":"aa5c94d3","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Show failed tests on CircleCI layout in a better way (#25895)","url":null}},"13908":{"buildId":13908,"infos":{"hash":"70a98024b1b0007d2d8bdced854cd9b638dbb07b","abbrevHash":"70a98024","authorName":"Abhilash Majumder","authorEmail":"30946547+abhilash1910@users.noreply.github.com","subject":"Patch with accelerate xpu (#25714)","url":null}},"13909":{"buildId":13909,"infos":{"hash":"da1af21dbbc48ad4f6f0b27635cd3993ddc22b55","abbrevHash":"da1af21d","authorName":"andreeahedes","authorEmail":"53334746+andreeahedes@users.noreply.github.com","subject":"PegasusX add _no_split_modules (#25933)","url":null}},"13910":{"buildId":13910,"infos":{"hash":"1110b565d62e56105c8e5e4e2848bfbf469f8200","abbrevHash":"1110b565","authorName":"raghavanone","authorEmail":"115454562+raghavanone@users.noreply.github.com","subject":"Add TFDebertaV2ForMultipleChoice (#25932)","url":null}},"13911":{"buildId":13911,"infos":{"hash":"6bc517ccd4a3bcda4d0621d54a37c3e047df223a","abbrevHash":"6bc517cc","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"deepspeed resume from ckpt fixes and adding support for deepspeed optimizer and HF scheduler (#25863)","url":null}},"13912":{"buildId":13912,"infos":{"hash":"8d518013efbd10c178dd0dba0f9ba93229e2e78a","abbrevHash":"8d518013","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[Wav2Vec2 Conformer] Fix inference float16 (#25985)","url":null}},"13913":{"buildId":13913,"infos":{"hash":"6206f599e1f45b619f72f9d194929e545549416f","abbrevHash":"6206f599","authorName":"Injin Paek","authorEmail":"71638597+eenzeenee@users.noreply.github.com","subject":"Add LLaMA resources (#25859)","url":null}},"13914":{"buildId":13914,"infos":{"hash":"d0354e5e86842b757cec1ecb7de314a1f2421c1e","abbrevHash":"d0354e5e","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`CI`] Fix red CI and ERROR failed should show (#25995)","url":null}},"13915":{"buildId":13915,"infos":{"hash":"4fa0aff21ee083d0197a898cdf17ff476fae2ac3","abbrevHash":"4fa0aff2","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`VITS`] tokenizer integration test: fix revision did not exist (#25996)","url":null}},"13916":{"buildId":13916,"infos":{"hash":"b8def689346c45958268ec389ee6242bddc6d78c","abbrevHash":"b8def689","authorName":"Tanay Mehta","authorEmail":"heyytanay@gmail.com","subject":"Fix Mega chunking error when using decoder-only model (#25765)","url":null}},"13917":{"buildId":13917,"infos":{"hash":"172f42c512e1bf32554ef910fe82f07916b4d4af","abbrevHash":"172f42c5","authorName":"tju_skywalker","authorEmail":"929019882@qq.com","subject":"save space when converting hf model to megatron model. (#25950)","url":null}},"13918":{"buildId":13918,"infos":{"hash":"f6295c6c535c2b036a4533327ab5a92c6b199b78","abbrevHash":"f6295c6c","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#26003)","url":null}},"13919":{"buildId":13919,"infos":{"hash":"f6301b9a13b8467d1f88a6f419d76aefa15bd9b8","abbrevHash":"f6301b9a","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Falcon: fix revision propagation (#26006)","url":null}},"13920":{"buildId":13920,"infos":{"hash":"842e99f1b9ee2a0fa239997ef695c5ed0bd77195","abbrevHash":"842e99f1","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"TF-OPT attention mask fixes (#25238)","url":null}},"13921":{"buildId":13921,"infos":{"hash":"3e203f92bed937fa13c35adee1bdc45a92d18e61","abbrevHash":"3e203f92","authorName":"zspo","authorEmail":"songpo.zhang@foxmail.com","subject":"Fix small typo README.md (#25934)","url":null}},"13922":{"buildId":13922,"infos":{"hash":"fa522d8d7ba512d1e103f891263602ee3f2bd46d","abbrevHash":"fa522d8d","authorName":"Harheem Kim","authorEmail":"49297157+harheem@users.noreply.github.com","subject":"🌐[i18n-KO] Translated `llm_tutorial.md` to Korean (#25791)","url":null}},"13923":{"buildId":13923,"infos":{"hash":"300d6a4a62aac89b3f439110561d5a2268ffad9e","abbrevHash":"300d6a4a","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"Remove Falcon from undocumented list (#26008)","url":null}},"13924":{"buildId":13924,"infos":{"hash":"fa6107c97edf7cf725305a34735a57875b67d85e","abbrevHash":"fa6107c9","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"modify context length for GPTQ + version bump (#25899)","url":null}},"13925":{"buildId":13925,"infos":{"hash":"e3a9716384146b89f21a39bdf13dd4b1cac740bb","abbrevHash":"e3a97163","authorName":"Zach Mueller","authorEmail":"muellerzr@gmail.com","subject":"Fix err with FSDP (#25991)","url":null}},"14018":{"buildId":14018,"infos":{"hash":"f29fe7458953dbf00addaf793d95ea1965bc8441","abbrevHash":"f29fe745","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"Rewrite for custom code warning messages (#26291)","url":null}},"14019":{"buildId":14019,"infos":{"hash":"245532065d3ceddf1c0f8cb3e60ab6451861100a","abbrevHash":"24553206","authorName":"fxmarty","authorEmail":"9808326+fxmarty@users.noreply.github.com","subject":"fix deepspeed available detection (#26252)","url":null}},"14020":{"buildId":14020,"infos":{"hash":"00247ea0dec9b2219a43973a2d90c059dfa1df17","abbrevHash":"00247ea0","authorName":"Jinho Park","authorEmail":"jinhoparkseoul@gmail.com","subject":"add bbox input validation (#26294)","url":null}},"14021":{"buildId":14021,"infos":{"hash":"f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609","abbrevHash":"f94c9b3d","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"include changes from llama (#26260)","url":null}},"14022":{"buildId":14022,"infos":{"hash":"0b5024ce725a0f6b6d8cfe740e7a2a6021257c37","abbrevHash":"0b5024ce","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Trainer`] Refactor trainer + bnb logic (#26248)","url":null}},"14023":{"buildId":14023,"infos":{"hash":"e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d","abbrevHash":"e3a4bd2b","authorName":"Shijie Wu","authorEmail":"swu671@bloomberg.net","subject":"add custom RMSNorm to `ALL_LAYERNORM_LAYERS` (#26227)","url":null}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_latency_s/_.json deleted file mode 100644 index 3d4eac6a9ac64f5ae49ff2d3a39dafcada62c7cf..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.00258,"14019":0.00229,"14020":0.00303,"14021":0.00296,"14022":0.00261,"14023":0.00292},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index f719a0b91d2631632ad8af71d1adfec73e6aebae..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":388,"14019":437,"14020":330,"14021":338,"14022":383,"14023":342},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_latency_/s_.json deleted file mode 100644 index 7aac6f08d1d1c2cef9914e262ce533b239b37f4e..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.538,"14019":0.483,"14020":0.625,"14021":0.614,"14022":0.545,"14023":0.622},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index 1c43a2b2a575190108f6cc66be88995bfa0c2bec..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":372,"14019":414,"14020":320,"14021":326,"14022":367,"14023":322},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_latency_s/_.json deleted file mode 100644 index f795d8d7982df7d0cb541861230975342166048c..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.0033,"14019":0.00319,"14020":0.00327,"14021":0.00324,"14022":0.00319,"14023":0.00315},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":0.0032233333333333333,"ratio":0,"diff":0},"base":{"average":0.0032233333333333333}},"averages":[{"start":0,"end":5,"length":6,"average":0.0032233333333333333,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index e929c02332d1d4c3af8457b673c36d1035e74e1b..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":303,"14019":313,"14020":306,"14021":309,"14022":313,"14023":317},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":310.1666666666667,"ratio":0,"diff":0},"base":{"average":310.1666666666667}},"averages":[{"start":0,"end":5,"length":6,"average":310.1666666666667,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_latency_/s_.json deleted file mode 100644 index 79cf51632e58f493eea81d7ee8508aab5836cca8..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.517,"14019":0.52,"14020":0.51,"14021":0.504,"14022":0.518,"14023":0.503},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":0.512,"ratio":0,"diff":0},"base":{"average":0.512}},"averages":[{"start":0,"end":5,"length":6,"average":0.512,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index dcb0c7d73b8bc67d455db2cde41bc9c0ba64b2c9..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":387,"14019":385,"14020":392,"14021":397,"14022":386,"14023":398},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":390.8333333333333,"ratio":0,"diff":0},"base":{"average":390.8333333333333}},"averages":[{"start":0,"end":5,"length":6,"average":390.8333333333333,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_latency_s/_.json deleted file mode 100644 index b587f05628e547272f85b53a3751e9de5534261a..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.00568,"14019":0.00531,"14020":0.00557,"14021":0.0057,"14022":0.00395,"14023":0.00393},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index 2e07ec6a1b08952e010936ff55da2da9de89152a..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":2820,"14019":3010,"14020":2870,"14021":2810,"14022":4050,"14023":4070},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_latency_/s_.json deleted file mode 100644 index 82f6e737778b5dd670956328d4a342508cf3dc03..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.905,"14019":0.879,"14020":0.907,"14021":0.894,"14022":0.63,"14023":0.554},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index e91c67cc3a08fa5c80afcd3de5469ead37815b3f..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":3540,"14019":3640,"14020":3530,"14021":3580,"14022":5080,"14023":5780},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_latency_s/_.json deleted file mode 100644 index 20178c751b99243b17c843a78eef4a0be30e7425..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.00608,"14019":0.00398,"14020":0.00604,"14021":0.00606,"14022":0.00451,"14023":0.00521},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index b505bccb9250d14d5156ff92ae4aaec4215cc6ea..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":2630,"14019":4020,"14020":2650,"14021":2640,"14022":3550,"14023":3070},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_latency_/s_.json deleted file mode 100644 index 7d60fc23d9e70d1dd8191d42620709e648f795a9..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":0.784,"14019":0.543,"14020":0.79,"14021":0.78,"14022":0.594,"14023":0.665},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index 9bf74a7c3ad7afb547b96ac856c6273adc95ea22..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":4080,"14019":5890,"14020":4050,"14021":4100,"14022":5390,"14023":4810},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_latency/_s_.json b/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_latency/_s_.json deleted file mode 100644 index 7a06033d707516418c786ff0f67979c786244886..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_latency/_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"13713":0.00331,"13714":0.00456,"13715":0.00394,"13716":0.00324,"13717":0.00374,"13718":0.00321,"13719":0.00375,"13720":0.00372,"13721":0.00321,"13722":0.0032,"13723":0.0031,"13724":0.00377,"13725":0.0032,"13726":0.00367,"13727":0.00358,"13728":0.00375,"13729":0.00314,"13730":0.00316,"13731":0.00323,"13732":0.00305,"13733":0.00379,"13734":0.0039,"13735":0.00364,"13736":0.0036,"13737":0.00364,"13738":0.00323,"13739":0.00358,"13740":0.00361,"13741":0.00307,"13742":0.00381,"13743":0.0031,"13744":0.00314,"13745":0.00361,"13746":0.00333,"13747":0.00334,"13748":0.00373,"13749":0.00338,"13750":0.00367,"13752":0.00319,"13753":0.00321,"13754":0.00316,"13755":0.0032,"13756":0.00317,"13757":0.00398,"13758":0.00317,"13759":0.00333,"13760":0.00377,"13761":0.00306,"13762":0.00316,"13763":0.00317,"13764":0.00377,"13765":0.00375,"13766":0.00332,"13767":0.0032,"13768":0.00315,"13769":0.00318,"13770":0.00316,"13771":0.00318,"13772":0.00318,"13773":0.00379,"13774":0.00411,"13775":0.00308,"13776":0.00312,"13777":0.00322,"13778":0.0038,"13779":0.0032,"13780":0.00331,"13781":0.00307,"13782":0.00363,"13783":0.00378,"13784":0.00306,"13785":0.00306,"13786":0.00327,"13787":0.00367,"13788":0.00312,"13789":0.00306,"13790":0.00379,"13791":0.00315,"13792":0.00319,"13793":0.00349,"13794":0.00312,"13795":0.00384,"13796":0.00359,"13797":0.00322,"13798":0.00322,"13799":0.00378,"13800":0.00366,"13801":0.00369,"13802":0.00365,"13803":0.00357,"13804":0.00365,"13805":0.00316,"13806":0.00573,"13807":0.00358,"13808":0.00339,"13809":0.00313,"13810":0.0034,"13811":0.00308,"13812":0.00305,"13813":0.00313,"13814":0.00372,"13815":0.00362,"13816":0.00316,"13817":0.00385,"13818":0.00313,"13819":0.0031,"13820":0.00355,"13821":0.00401,"13822":0.00377,"13823":0.00316,"13824":0.00365,"13825":0.00358,"13826":0.0038,"13827":0.00366,"13828":0.00332,"13829":0.00304,"13830":0.00322,"13831":0.00308,"13832":0.00383,"13833":0.0038,"13834":0.0031,"13835":0.00356,"13836":0.00399,"13837":0.0037,"13838":0.00317,"13839":0.00357,"13840":0.00362,"13841":0.00369,"13842":0.00322,"13843":0.00363,"13844":0.0031,"13845":0.00306,"13846":0.00315,"13847":0.00313,"13848":0.00368,"13849":0.00371,"13850":0.00388,"13851":0.0038,"13852":0.00314,"13853":0.00324,"13854":0.00365,"13855":0.00506,"13856":0.00364,"13857":0.00392,"13858":0.00314,"13859":0.0035,"13860":0.0042,"13861":0.00374,"13862":0.00322,"13863":0.00325,"13864":0.00336,"13865":0.00607,"13866":0.00588,"13867":0.00598,"13868":0.00602,"13869":0.00603,"13870":0.0066,"13871":0.00775,"13872":0.00618,"13873":0.00654,"13874":0.00623,"13875":0.00697,"13876":0.00698,"13877":0.00684,"13878":0.00698,"13879":0.0064,"13880":0.00692,"13881":0.00706,"13882":0.006,"13883":0.00676,"13884":0.00687,"13885":0.00593,"13886":0.0059,"13887":0.00697,"13888":0.00583,"13889":0.00596,"13890":0.00622,"13891":0.00326,"13892":0.00357,"13893":0.00318,"13894":0.0037,"13895":0.00364,"13896":0.00361,"13897":0.00327,"13898":0.0033,"13899":0.00339,"13900":0.00328,"13901":0.00289,"13902":0.00304,"13903":0.0035,"13904":0.00333,"13905":0.00351,"13906":0.0034,"13907":0.00309,"13908":0.00327,"13909":0.00323,"13910":0.00308,"13911":0.00363,"13912":0.00314,"13913":0.00315,"13914":0.00378,"13915":0.00319,"13916":0.00384,"13917":0.00353,"13918":0.00349,"13919":0.0036,"13920":0.00334,"13921":0.00327,"13922":0.00386,"13923":0.0032,"13924":0.00316,"13925":0.00318,"14018":0.00368,"14019":0.0037,"14020":0.00395,"14021":0.00359,"14022":0.00372,"14023":0.00355},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":0.0035975,"ratio":-0.5234050957930142,"diff":-0.00001892857142857165},"base":{"average":0.0036164285714285717}},"averages":[{"start":20,"end":37,"length":18,"average":0.0036164285714285717,"ratio":0,"diff":0,"status":"similar"},{"start":38,"end":84,"length":47,"average":0.003171515151515151,"ratio":-12.30256345800492,"diff":-0.0004449134199134208,"status":"improvement"},{"start":85,"end":94,"length":10,"average":0.0036212500000000003,"ratio":0.13332016590954293,"diff":0.000004821428571428685,"status":"similar"},{"start":95,"end":109,"length":15,"average":0.0031488888888888886,"ratio":-12.928215594618928,"diff":-0.0004675396825396831,"status":"improvement"},{"start":110,"end":150,"length":41,"average":0.0036783333333333334,"ratio":1.711765093159518,"diff":0.00006190476190476172,"status":"similar"},{"start":151,"end":160,"length":10,"average":0.006116249999999999,"ratio":69.12403713213507,"diff":0.0024998214285714276,"status":"regression"},{"start":161,"end":180,"length":20,"average":0.006815454545454546,"ratio":88.45815452570342,"diff":0.0031990259740259748,"status":"regression"},{"start":181,"end":201,"length":21,"average":0.0033338888888888884,"ratio":-7.812671450830663,"diff":-0.00028253968253968325,"status":"similar"},{"start":202,"end":217,"length":16,"average":0.0035975,"ratio":-0.5234050957930142,"diff":-0.00001892857142857165,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_peak_me/mory_MB_.json b/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_peak_me/mory_MB_.json deleted file mode 100644 index 79b73501e596dd3dcdfef11b8448fd1e99851c97..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_peak_me/mory_MB_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13880},"assignee":{"compares":{}},"samples":{"13713":466.690048,"13714":467.267584,"13715":466.673664,"13716":466.71871999999996,"13717":467.05868799999996,"13718":465.825792,"13719":466.059264,"13720":466.41152,"13721":467.140608,"13722":467.873792,"13723":466.980864,"13724":466.706432,"13725":466.849792,"13726":467.61983999999995,"13727":466.98496,"13728":467.18976,"13729":466.39104,"13730":468.97152,"13731":466.075648,"13732":466.88255999999996,"13733":466.804736,"13734":467.324928,"13735":467.603456,"13736":466.84159999999997,"13737":468.262912,"13738":467.083264,"13739":466.980864,"13740":467.06687999999997,"13741":466.980864,"13742":467.132416,"13743":467.18976,"13744":466.911232,"13745":466.93580799999995,"13746":467.21024,"13747":467.410944,"13748":467.369984,"13749":466.94809599999996,"13750":467.214336,"13752":466.804736,"13753":466.649088,"13754":466.526208,"13755":467.73862399999996,"13756":467.31264,"13757":467.06278399999997,"13758":467.030016,"13759":466.972672,"13760":466.976768,"13761":466.15347199999997,"13762":466.43609599999996,"13763":466.403328,"13764":467.18156799999997,"13765":465.87904,"13766":466.0224,"13767":467.542016,"13768":465.903616,"13769":466.55487999999997,"13770":466.57536,"13771":467.06278399999997,"13772":467.030016,"13773":466.54668799999996,"13774":466.54668799999996,"13775":466.6368,"13776":467.39455999999996,"13777":466.66956799999997,"13778":467.263488,"13779":466.83750399999997,"13780":467.00134399999996,"13781":467.132416,"13782":466.80064,"13783":467.611648,"13784":467.156992,"13785":467.43552,"13786":467.06687999999997,"13787":466.898944,"13788":467.34950399999997,"13789":467.50515199999995,"13790":467.7632,"13791":467.28806399999996,"13792":466.86208,"13793":466.620416,"13794":467.218432,"13795":466.51392,"13796":466.919424,"13797":467.357696,"13798":467.304448,"13799":467.29215999999997,"13800":466.804736,"13801":467.75091199999997,"13802":466.86208,"13803":467.59936,"13804":467.202048,"13805":466.644992,"13806":467.255296,"13807":468.19737599999996,"13808":467.06687999999997,"13809":467.378176,"13810":466.94399999999996,"13811":466.54668799999996,"13812":468.43904,"13813":467.18976,"13814":467.042304,"13815":467.144704,"13816":466.66547199999997,"13817":466.915328,"13818":466.02649599999995,"13819":467.042304,"13820":466.36646399999995,"13821":467.202048,"13822":467.492864,"13823":466.86208,"13824":466.60403199999996,"13825":467.02592,"13826":466.808832,"13827":467.079168,"13828":466.522112,"13829":466.989056,"13830":466.419712,"13831":467.075072,"13832":466.567168,"13833":466.735104,"13834":466.989056,"13835":466.72691199999997,"13836":467.05459199999996,"13837":466.747392,"13838":466.853888,"13839":467.00953599999997,"13840":467.1488,"13841":466.812928,"13842":467.45599999999996,"13843":466.90304,"13844":466.83750399999997,"13845":466.857984,"13846":466.587648,"13847":467.243008,"13848":466.857984,"13849":467.030016,"13850":467.06278399999997,"13851":466.874368,"13852":467.492864,"13853":466.898944,"13854":466.755584,"13855":466.31731199999996,"13856":467.62393599999996,"13857":467.06278399999997,"13858":466.78425599999997,"13859":466.54668799999996,"13860":466.65318399999995,"13861":467.095552,"13862":466.78425599999997,"13863":467.214336,"13864":466.415616,"13865":468.434944,"13866":467.95980799999995,"13867":468.53324799999996,"13868":468.795392,"13869":468.103168,"13870":468.36121599999996,"13871":468.840448,"13872":469.143552,"13873":468.922368,"13874":468.24243199999995,"13875":468.07859199999996,"13876":469.04115199999995,"13877":468.611072,"13878":468.62336,"13879":469.17632,"13880":468.856832,"13881":468.168704,"13882":468.979712,"13883":468.50048,"13884":468.627456,"13885":468.267008,"13886":468.71347199999997,"13887":468.721664,"13888":468.82406399999996,"13889":468.578304,"13890":468.959232,"13891":467.18156799999997,"13892":467.68947199999997,"13893":467.070976,"13894":466.65727999999996,"13895":466.808832,"13896":468.01715199999995,"13897":466.48115199999995,"13898":467.73862399999996,"13899":466.845696,"13900":466.522112,"13901":466.86208,"13902":466.358272,"13903":466.18624,"13904":466.706432,"13905":466.88255999999996,"13906":466.444288,"13907":466.94399999999996,"13908":466.89075199999996,"13909":466.808832,"13910":466.57536,"13911":466.874368,"13912":466.583552,"13913":466.65318399999995,"13914":466.956288,"13915":466.845696,"13916":466.51392,"13917":467.247104,"13918":466.460672,"13919":466.66547199999997,"13920":467.070976,"13921":466.976768,"13922":466.644992,"13923":466.751488,"13924":467.501056,"13925":466.43199999999996,"14018":553,"14019":554,"14020":554,"14021":553,"14022":554,"14023":554},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13880},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":166,"first":0},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":553.6666666666666,"ratio":18.48621536351449,"diff":86.38305484057952},"base":{"average":467.2836118260871}},"averages":[{"start":166,"end":211,"length":46,"average":467.2836118260871,"ratio":0,"diff":0,"status":"similar"},{"start":212,"end":217,"length":6,"average":553.6666666666666,"ratio":18.48621536351449,"diff":86.38305484057952,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_through/put_sample/s_s_.json b/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_through/put_sample/s_s_.json deleted file mode 100644 index 6558327971d7289339dea973cf721fa57e21ff29..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_be/rt_0_forwa/rd_through/put_sample/s_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"13713":302,"13714":219,"13715":254,"13716":309,"13717":267,"13718":312,"13719":267,"13720":269,"13721":312,"13722":312,"13723":323,"13724":265,"13725":312,"13726":272,"13727":279,"13728":267,"13729":318,"13730":316,"13731":310,"13732":328,"13733":264,"13734":256,"13735":275,"13736":278,"13737":275,"13738":310,"13739":279,"13740":277,"13741":326,"13742":262,"13743":323,"13744":318,"13745":277,"13746":300,"13747":299,"13748":268,"13749":296,"13750":272,"13752":313,"13753":312,"13754":316,"13755":312,"13756":315,"13757":251,"13758":315,"13759":300,"13760":265,"13761":327,"13762":316,"13763":315,"13764":265,"13765":267,"13766":301,"13767":312,"13768":317,"13769":314,"13770":316,"13771":314,"13772":314,"13773":264,"13774":243,"13775":325,"13776":321,"13777":311,"13778":263,"13779":312,"13780":302,"13781":326,"13782":275,"13783":265,"13784":327,"13785":327,"13786":306,"13787":272,"13788":321,"13789":327,"13790":264,"13791":317,"13792":313,"13793":287,"13794":321,"13795":260,"13796":279,"13797":311,"13798":311,"13799":265,"13800":273,"13801":271,"13802":274,"13803":280,"13804":274,"13805":316,"13806":175,"13807":279,"13808":295,"13809":319,"13810":294,"13811":325,"13812":328,"13813":319,"13814":269,"13815":276,"13816":316,"13817":260,"13818":319,"13819":323,"13820":282,"13821":249,"13822":265,"13823":316,"13824":274,"13825":279,"13826":263,"13827":273,"13828":301,"13829":329,"13830":311,"13831":325,"13832":261,"13833":263,"13834":323,"13835":281,"13836":251,"13837":270,"13838":315,"13839":280,"13840":276,"13841":271,"13842":311,"13843":275,"13844":323,"13845":327,"13846":317,"13847":319,"13848":272,"13849":270,"13850":258,"13851":263,"13852":318,"13853":309,"13854":274,"13855":198,"13856":275,"13857":255,"13858":318,"13859":286,"13860":238,"13861":267,"13862":311,"13863":308,"13864":298,"13865":165,"13866":170,"13867":167,"13868":166,"13869":166,"13870":152,"13871":129,"13872":162,"13873":153,"13874":161,"13875":143,"13876":143,"13877":146,"13878":143,"13879":156,"13880":145,"13881":142,"13882":167,"13883":148,"13884":146,"13885":169,"13886":169,"13887":143,"13888":172,"13889":168,"13890":161,"13891":307,"13892":280,"13893":314,"13894":270,"13895":275,"13896":277,"13897":306,"13898":303,"13899":295,"13900":305,"13901":346,"13902":329,"13903":286,"13904":300,"13905":285,"13906":294,"13907":324,"13908":306,"13909":310,"13910":325,"13911":275,"13912":318,"13913":317,"13914":265,"13915":313,"13916":260,"13917":283,"13918":287,"13919":278,"13920":299,"13921":306,"13922":259,"13923":312,"13924":316,"13925":314,"14018":1090,"14019":1080,"14020":1010,"14021":1110,"14022":1080,"14023":1130},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":1083.3333333333333,"ratio":296.46233607807255,"diff":810.0833333333333},"base":{"average":273.25}},"averages":[{"start":20,"end":37,"length":18,"average":273.25,"ratio":0,"diff":0,"status":"similar"},{"start":38,"end":84,"length":47,"average":314.52941176470586,"ratio":15.106829557074422,"diff":41.279411764705856,"status":"regression"},{"start":85,"end":94,"length":10,"average":276.375,"ratio":1.1436413540713632,"diff":3.125,"status":"similar"},{"start":95,"end":150,"length":56,"average":315.1111111111111,"ratio":15.319711294093718,"diff":41.861111111111086,"status":"regression"},{"start":151,"end":160,"length":10,"average":162.44444444444446,"ratio":-40.550980990139266,"diff":-110.80555555555554,"status":"improvement"},{"start":161,"end":189,"length":29,"average":145.5,"ratio":-46.752058554437326,"diff":-127.75,"status":"improvement"},{"start":190,"end":201,"length":12,"average":304.3,"ratio":11.363220494053069,"diff":31.05000000000001,"status":"regression"},{"start":202,"end":211,"length":10,"average":281.7142857142857,"ratio":3.097634296170438,"diff":8.464285714285722,"status":"similar"},{"start":212,"end":217,"length":6,"average":1083.3333333333333,"ratio":296.46233607807255,"diff":810.0833333333333,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_latency/_s_.json b/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_latency/_s_.json deleted file mode 100644 index d262baf0e6e7ab2d7c90aeaba0ac18fc9e44208e..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_latency/_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13721},"assignee":{"compares":{}},"samples":{"13713":0.00343,"13714":0.00376,"13715":0.00343,"13716":0.00353,"13717":0.00378,"13718":0.00354,"13719":0.00433,"13720":0.0041,"13721":0.00363,"13722":0.00355,"13723":0.00342,"13724":0.00428,"13725":0.00361,"13726":0.00413,"13727":0.00347,"13728":0.00426,"13729":0.00348,"13730":0.00339,"13731":0.00378,"13732":0.00338,"13733":0.00427,"13734":0.00339,"13735":0.00412,"13736":0.00416,"13737":0.00336,"13738":0.00357,"13739":0.00409,"13740":0.00422,"13741":0.00346,"13742":0.00358,"13743":0.0034,"13744":0.00348,"13745":0.00348,"13746":0.00352,"13747":0.00435,"13748":0.0043,"13749":0.00423,"13750":0.0041,"13752":0.00346,"13753":0.00355,"13754":0.00353,"13755":0.00349,"13756":0.00349,"13757":0.00435,"13758":0.00352,"13759":0.00343,"13760":0.00424,"13761":0.0034,"13762":0.00349,"13763":0.00344,"13764":0.0043,"13765":0.00425,"13766":0.00369,"13767":0.00509,"13768":0.00392,"13769":0.00369,"13770":0.00356,"13771":0.00355,"13772":0.00348,"13773":0.00429,"13774":0.00356,"13775":0.00343,"13776":0.00345,"13777":0.00353,"13778":0.00436,"13779":0.00422,"13780":0.00342,"13781":0.0034,"13782":0.00419,"13783":0.00422,"13784":0.00341,"13785":0.0034,"13786":0.00433,"13787":0.0041,"13788":0.00344,"13789":0.00342,"13790":0.00434,"13791":0.0036,"13792":0.00345,"13793":0.00351,"13794":0.00337,"13795":0.00451,"13796":0.00354,"13797":0.00356,"13798":0.00358,"13799":0.00432,"13800":0.00346,"13801":0.00347,"13802":0.0036,"13803":0.00446,"13804":0.00413,"13805":0.00359,"13806":0.00381,"13807":0.00395,"13808":0.00357,"13809":0.00487,"13810":0.00359,"13811":0.00339,"13812":0.00344,"13813":0.00347,"13814":0.0036,"13815":0.0042,"13816":0.00354,"13817":0.00427,"13818":0.00345,"13819":0.00346,"13820":0.00394,"13821":0.00445,"13822":0.00425,"13823":0.0035,"13824":0.00432,"13825":0.00358,"13826":0.00428,"13827":0.00419,"13828":0.00486,"13829":0.00353,"13830":0.00353,"13831":0.00338,"13832":0.0044,"13833":0.00426,"13834":0.00341,"13835":0.00402,"13836":0.0034,"13837":0.00358,"13838":0.00353,"13839":0.00422,"13840":0.00416,"13841":0.00414,"13842":0.00357,"13843":0.00411,"13844":0.00342,"13845":0.00377,"13846":0.00347,"13847":0.00348,"13848":0.00414,"13849":0.00412,"13850":0.00434,"13851":0.00429,"13852":0.0035,"13853":0.00359,"13854":0.00358,"13855":0.0149,"13856":0.00373,"13857":0.00423,"13858":0.00344,"13859":0.00429,"13860":0.00397,"13861":0.00419,"13862":0.00356,"13863":0.00503,"13864":0.0034,"13865":0.00499,"13866":0.0049,"13867":0.00511,"13868":0.00499,"13869":0.00507,"13870":0.0057,"13871":0.00537,"13872":0.00532,"13873":0.00503,"13874":0.0051,"13875":0.00624,"13876":0.00614,"13877":0.00635,"13878":0.00671,"13879":0.0051,"13880":0.00634,"13881":0.00648,"13882":0.00504,"13883":0.0051,"13884":0.0063,"13885":0.00507,"13886":0.00503,"13887":0.00603,"13888":0.00495,"13889":0.00507,"13890":0.0051,"13891":0.00354,"13892":0.00342,"13893":0.00359,"13894":0.00435,"13895":0.00418,"13896":0.00344,"13897":0.00464,"13898":0.00366,"13899":0.00469,"13900":0.00458,"13901":0.00324,"13902":0.00334,"13903":0.0036,"13904":0.00459,"13905":0.00385,"13906":0.00431,"13907":0.00342,"13908":0.00357,"13909":0.00364,"13910":0.00359,"13911":0.00414,"13912":0.00347,"13913":0.00349,"13914":0.00419,"13915":0.00354,"13916":0.00429,"13917":0.00415,"13918":0.0037,"13919":0.00378,"13920":0.00359,"13921":0.00363,"13922":0.00429,"13923":0.0036,"13924":0.00355,"13925":0.00352,"14018":0.00316,"14019":0.00322,"14020":0.0037,"14021":0.00328,"14022":0.00317,"14023":0.00313},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13721},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":8,"first":0},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":0.005028235294117648,"ratio":43.88150143073557,"diff":0.0015335294117647062},"base":{"average":0.0034947058823529414}},"averages":[{"start":28,"end":53,"length":26,"average":0.0034947058823529414,"ratio":0,"diff":0,"status":"similar"},{"start":54,"end":150,"length":97,"average":0.0035203508771929825,"ratio":0.7338241243573447,"diff":0.00002564499484004109,"status":"similar"},{"start":151,"end":155,"length":5,"average":0.005012,"ratio":43.41693317623295,"diff":0.0015172941176470585,"status":"regression"},{"start":156,"end":217,"length":62,"average":0.005028235294117648,"ratio":43.88150143073557,"diff":0.0015335294117647062,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_peak_me/mory_MB_.json b/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_peak_me/mory_MB_.json deleted file mode 100644 index 7541ec8c7b026599637c13bc17ae533257c7f74a..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_peak_me/mory_MB_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13880},"assignee":{"compares":{}},"samples":{"13713":467.69356799999997,"13714":468.283392,"13715":467.73862399999996,"13716":467.726336,"13717":468.103168,"13718":466.84159999999997,"13719":467.099648,"13720":467.431424,"13721":468.267008,"13722":468.98380799999995,"13723":468.08678399999997,"13724":467.73452799999995,"13725":467.94752,"13726":468.6848,"13727":467.980288,"13728":468.25062399999996,"13729":467.49696,"13730":470.102016,"13731":467.202048,"13732":468.00896,"13733":467.828736,"13734":468.279296,"13735":468.64383999999995,"13736":467.91475199999996,"13737":469.315584,"13738":468.19737599999996,"13739":467.96390399999996,"13740":468.103168,"13741":468.09087999999997,"13742":468.14822399999997,"13743":468.29977599999995,"13744":467.98848,"13745":467.97209599999996,"13746":468.279296,"13747":468.508672,"13748":468.393984,"13749":468.013056,"13750":468.226048,"13752":467.90246399999995,"13753":467.74271999999996,"13754":467.62803199999996,"13755":468.832256,"13756":468.410368,"13757":468.066304,"13758":468.13183999999995,"13759":468.045824,"13760":468.041728,"13761":467.27987199999995,"13762":467.53792,"13763":467.50924799999996,"13764":468.209664,"13765":466.845696,"13766":467.144704,"13767":468.635648,"13768":467.00134399999996,"13769":467.68947199999997,"13770":467.67718399999995,"13771":468.160512,"13772":468.127744,"13773":467.57068799999996,"13774":467.56249599999995,"13775":467.697664,"13776":468.496384,"13777":467.705856,"13778":468.31615999999997,"13779":467.96799999999996,"13780":468.0704,"13781":468.24652799999996,"13782":467.828736,"13783":468.63974399999995,"13784":468.262912,"13785":468.557824,"13786":468.15232,"13787":467.943424,"13788":468.455424,"13789":468.59468799999996,"13790":468.81587199999996,"13791":468.402176,"13792":467.96390399999996,"13793":467.61983999999995,"13794":468.31615999999997,"13795":467.525632,"13796":467.94752,"13797":468.41855999999996,"13798":468.3776,"13799":468.340736,"13800":467.80415999999997,"13801":468.774912,"13802":467.890176,"13803":468.66432,"13804":468.230144,"13805":467.67718399999995,"13806":468.14412799999997,"13807":469.241856,"13808":468.115456,"13809":468.47180799999995,"13810":467.97209599999996,"13811":467.582976,"13812":469.51219199999997,"13813":468.23424,"13814":467.92704,"13815":468.127744,"13816":467.73043199999995,"13817":467.8656,"13818":467.021824,"13819":468.0704,"13820":467.304448,"13821":468.14412799999997,"13822":468.45952,"13823":467.90246399999995,"13824":467.533824,"13825":467.996672,"13826":467.68127999999996,"13827":468.058112,"13828":467.47648,"13829":468.045824,"13830":467.40275199999996,"13831":468.13183999999995,"13832":467.50924799999996,"13833":467.64032,"13834":468.037632,"13835":467.62393599999996,"13836":468.013056,"13837":467.63212799999997,"13838":467.80006399999996,"13839":467.996672,"13840":468.13593599999996,"13841":467.79187199999996,"13842":468.402176,"13843":467.894272,"13844":467.91884799999997,"13845":467.869696,"13846":467.62803199999996,"13847":468.29568,"13848":467.755008,"13849":468.000768,"13850":467.92704,"13851":467.820544,"13852":468.52915199999995,"13853":467.939328,"13854":467.714048,"13855":467.18976,"13856":468.615168,"13857":467.992576,"13858":467.836928,"13859":467.443712,"13860":467.550208,"13861":468.03353599999997,"13862":467.80006399999996,"13863":468.226048,"13864":467.431424,"13865":469.46304,"13866":468.99609599999997,"13867":469.55315199999995,"13868":469.83168,"13869":469.143552,"13870":469.36064,"13871":469.83168,"13872":470.15936,"13873":469.929984,"13874":469.27871999999996,"13875":469.078016,"13876":470.02419199999997,"13877":469.62278399999997,"13878":469.635072,"13879":470.18803199999996,"13880":469.87264,"13881":469.16403199999996,"13882":469.983232,"13883":469.50809599999997,"13884":469.61459199999996,"13885":469.21727999999996,"13886":469.655552,"13887":469.704704,"13888":469.864448,"13889":469.630976,"13890":469.884928,"13891":468.11136,"13892":468.64793599999996,"13893":468.08678399999997,"13894":467.63212799999997,"13895":467.79187199999996,"13896":468.975616,"13897":467.50515199999995,"13898":468.74624,"13899":467.836928,"13900":467.550208,"13901":467.96390399999996,"13902":467.41504,"13903":467.197952,"13904":467.714048,"13905":467.78777599999995,"13906":467.3536,"13907":467.984384,"13908":467.873792,"13909":467.82464,"13910":467.656704,"13911":467.85331199999996,"13912":467.63622399999997,"13913":467.70176,"13914":467.91884799999997,"13915":467.894272,"13916":467.44780799999995,"13917":468.23424,"13918":467.3536,"13919":467.652608,"13920":467.984384,"13921":467.97619199999997,"13922":467.61983999999995,"13923":467.68537599999996,"13924":468.53324799999996,"13925":467.45599999999996,"14018":551,"14019":551,"14020":551,"14021":551,"14022":551,"14023":551},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13880},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":166,"first":0},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":551,"ratio":17.66560507389197,"diff":82.72382052173913},"base":{"average":468.2761794782609}},"averages":[{"start":166,"end":211,"length":46,"average":468.2761794782609,"ratio":0,"diff":0,"status":"similar"},{"start":212,"end":217,"length":6,"average":551,"ratio":17.66560507389197,"diff":82.72382052173913,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_through/put_sample/s_s_.json b/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_through/put_sample/s_s_.json deleted file mode 100644 index 1c40208a3c37169c76d04c12d82fb4d97e933f76..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_be/rt_1_forwa/rd_through/put_sample/s_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13721},"assignee":{"compares":{}},"samples":{"13713":1170,"13714":1060,"13715":1170,"13716":1130,"13717":1060,"13718":1130,"13719":924,"13720":976,"13721":1100,"13722":1130,"13723":1170,"13724":935,"13725":1110,"13726":969,"13727":1150,"13728":939,"13729":1150,"13730":1180,"13731":1060,"13732":1180,"13733":937,"13734":1180,"13735":971,"13736":962,"13737":1190,"13738":1120,"13739":978,"13740":948,"13741":1160,"13742":1120,"13743":1180,"13744":1150,"13745":1150,"13746":1140,"13747":920,"13748":930,"13749":946,"13750":976,"13752":1160,"13753":1130,"13754":1130,"13755":1150,"13756":1150,"13757":920,"13758":1140,"13759":1170,"13760":943,"13761":1180,"13762":1150,"13763":1160,"13764":930,"13765":941,"13766":1080,"13767":786,"13768":1020,"13769":1080,"13770":1120,"13771":1130,"13772":1150,"13773":932,"13774":1120,"13775":1170,"13776":1160,"13777":1130,"13778":917,"13779":948,"13780":1170,"13781":1180,"13782":955,"13783":948,"13784":1170,"13785":1180,"13786":924,"13787":976,"13788":1160,"13789":1170,"13790":922,"13791":1110,"13792":1160,"13793":1140,"13794":1190,"13795":887,"13796":1130,"13797":1120,"13798":1120,"13799":926,"13800":1160,"13801":1150,"13802":1110,"13803":897,"13804":969,"13805":1110,"13806":1050,"13807":1010,"13808":1120,"13809":821,"13810":1110,"13811":1180,"13812":1160,"13813":1150,"13814":1110,"13815":952,"13816":1130,"13817":937,"13818":1160,"13819":1160,"13820":1020,"13821":899,"13822":941,"13823":1140,"13824":926,"13825":1120,"13826":935,"13827":955,"13828":823,"13829":1130,"13830":1130,"13831":1180,"13832":909,"13833":939,"13834":1170,"13835":995,"13836":1180,"13837":1120,"13838":1130,"13839":948,"13840":962,"13841":966,"13842":1120,"13843":973,"13844":1170,"13845":1060,"13846":1150,"13847":1150,"13848":966,"13849":971,"13850":922,"13851":932,"13852":1140,"13853":1110,"13854":1120,"13855":268,"13856":1070,"13857":946,"13858":1160,"13859":932,"13860":1010,"13861":955,"13862":1120,"13863":795,"13864":1180,"13865":802,"13866":816,"13867":783,"13868":802,"13869":789,"13870":702,"13871":745,"13872":752,"13873":795,"13874":784,"13875":641,"13876":651,"13877":630,"13878":596,"13879":784,"13880":631,"13881":617,"13882":794,"13883":784,"13884":635,"13885":789,"13886":795,"13887":663,"13888":808,"13889":789,"13890":784,"13891":1130,"13892":1170,"13893":1110,"13894":920,"13895":957,"13896":1160,"13897":862,"13898":1090,"13899":853,"13900":873,"13901":1230,"13902":1200,"13903":1110,"13904":871,"13905":1040,"13906":928,"13907":1170,"13908":1120,"13909":1100,"13910":1110,"13911":966,"13912":1150,"13913":1150,"13914":955,"13915":1130,"13916":932,"13917":964,"13918":1080,"13919":1060,"13920":1110,"13921":1100,"13922":932,"13923":1110,"13924":1130,"13925":1140,"14018":316,"14019":311,"14020":270,"14021":305,"14022":315,"14023":319},"state":{"analyse":"improvementNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13721},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":8,"first":0},"summary":{"lastBuildId":14023,"status":"improvement","current":{"average":782.7142857142857,"ratio":-31.763369963369964,"diff":-364.34453781512605},"base":{"average":1147.0588235294117}},"averages":[{"start":28,"end":53,"length":26,"average":1147.0588235294117,"ratio":0,"diff":0,"status":"similar"},{"start":54,"end":150,"length":97,"average":1136.6666666666667,"ratio":-0.9059829059828948,"diff":-10.392156862744969,"status":"similar"},{"start":151,"end":155,"length":5,"average":798.4,"ratio":-30.395897435897435,"diff":-348.65882352941173,"status":"improvement"},{"start":156,"end":217,"length":62,"average":782.7142857142857,"ratio":-31.763369963369964,"diff":-364.34453781512605,"status":"improvement"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_latency/_s_.json b/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_latency/_s_.json deleted file mode 100644 index 2a9e460a3ae7b7ed8b812b2da2ebbaee4b73f02f..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_latency/_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"13713":0.00381,"13714":0.0037,"13715":0.00338,"13716":0.00361,"13717":0.0031,"13718":0.00394,"13719":0.00376,"13720":0.00378,"13721":0.00315,"13722":0.0033,"13723":0.00375,"13724":0.00344,"13725":0.00383,"13726":0.00315,"13727":0.00382,"13728":0.00404,"13729":0.00315,"13730":0.00372,"13731":0.00378,"13732":0.00373,"13733":0.00371,"13734":0.00335,"13735":0.00306,"13736":0.00372,"13737":0.00452,"13738":0.0038,"13739":0.00379,"13740":0.00382,"13741":0.00373,"13742":0.00382,"13743":0.00382,"13744":0.00315,"13745":0.00305,"13746":0.00312,"13747":0.00336,"13748":0.00308,"13749":0.00386,"13750":0.00353,"13752":0.00375,"13753":0.00387,"13754":0.00387,"13755":0.00381,"13756":0.0038,"13757":0.00342,"13758":0.00375,"13759":0.0037,"13760":0.00384,"13761":0.00334,"13762":0.00374,"13763":0.00375,"13764":0.00397,"13765":0.00386,"13766":0.00347,"13767":0.00304,"13768":0.00317,"13769":0.00431,"13770":0.00318,"13771":0.00382,"13772":0.00381,"13773":0.00343,"13774":0.00306,"13775":0.00306,"13776":0.00372,"13777":0.00383,"13778":0.00335,"13779":0.00387,"13780":0.00391,"13781":0.00385,"13782":0.00383,"13783":0.0031,"13784":0.00382,"13785":0.00387,"13786":0.00341,"13787":0.00374,"13788":0.00375,"13789":0.00376,"13790":0.00303,"13791":0.00381,"13792":0.00449,"13793":0.00383,"13794":0.00399,"13795":0.00303,"13796":0.00379,"13797":0.00386,"13798":0.00375,"13799":0.00451,"13800":0.00394,"13801":0.00382,"13802":0.00348,"13803":0.00326,"13804":0.00388,"13805":0.00444,"13806":0.00343,"13807":0.00503,"13808":0.00355,"13809":0.00394,"13810":0.00384,"13811":0.00385,"13812":0.00391,"13813":0.0033,"13814":0.00381,"13815":0.00379,"13816":0.00377,"13817":0.00382,"13818":0.00387,"13819":0.00309,"13820":0.00379,"13821":0.00371,"13822":0.00316,"13823":0.00393,"13824":0.00334,"13825":0.00393,"13826":0.0038,"13827":0.00367,"13828":0.00411,"13829":0.00319,"13830":0.00384,"13831":0.00374,"13832":0.00389,"13833":0.00383,"13834":0.0038,"13835":0.00309,"13836":0.00378,"13837":0.00396,"13838":0.00345,"13839":0.00307,"13840":0.00391,"13841":0.00383,"13842":0.00311,"13843":0.00327,"13844":0.0034,"13845":0.00311,"13846":0.00383,"13847":0.00386,"13848":0.0038,"13849":0.00391,"13850":0.00323,"13851":0.00376,"13852":0.00312,"13853":0.00335,"13854":0.0038,"13855":0.00508,"13856":0.0033,"13857":0.00384,"13858":0.00316,"13859":0.00325,"13860":0.00312,"13861":0.00307,"13862":0.00397,"13863":0.00387,"13864":0.00408,"13865":0.00448,"13866":0.00372,"13867":0.0044,"13868":0.00322,"13869":0.00382,"13870":0.00403,"13871":0.00472,"13872":0.00355,"13873":0.00451,"13874":0.00378,"13875":0.00374,"13876":0.00369,"13877":0.00382,"13878":0.00501,"13879":0.00373,"13880":0.00376,"13881":0.00381,"13882":0.00389,"13883":0.00463,"13884":0.00312,"13885":0.00325,"13886":0.0039,"13887":0.00391,"13888":0.00342,"13889":0.00387,"13890":0.00402,"13891":0.00389,"13892":0.00374,"13893":0.00376,"13894":0.00386,"13895":0.00313,"13896":0.00389,"13897":0.00341,"13898":0.00331,"13899":0.00343,"13900":0.00327,"13901":0.00345,"13902":0.00343,"13903":0.00327,"13904":0.00349,"13905":0.00389,"13906":0.00388,"13907":0.00379,"13908":0.00387,"13909":0.004,"13910":0.00341,"13911":0.00383,"13912":0.00387,"13913":0.00397,"13914":0.00317,"13915":0.00324,"13916":0.00314,"13917":0.00394,"13918":0.0032,"13919":0.00381,"13920":0.00364,"13921":0.00417,"13922":0.00379,"13923":0.00335,"13924":0.00318,"13925":0.00312,"14018":0.00324,"14019":0.00324,"14020":0.00344,"14021":0.00339,"14022":0.00324,"14023":0.00319},"state":{"analyse":"improvementNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"improvement","current":{"average":0.0032655555555555554,"ratio":-13.988879133743058,"diff":-0.0005311111111111115},"base":{"average":0.003796666666666667}},"averages":[{"start":25,"end":30,"length":6,"average":0.003796666666666667,"ratio":0,"diff":0,"status":"similar"},{"start":31,"end":35,"length":5,"average":0.003152,"ratio":-16.97980684811239,"diff":-0.0006446666666666671,"status":"improvement"},{"start":36,"end":182,"length":147,"average":0.0037963366336633657,"ratio":-0.008692704213378108,"diff":-3.300330033012555e-7,"status":"similar"},{"start":183,"end":190,"length":8,"average":0.0033824999999999997,"ratio":-10.908691834942948,"diff":-0.0004141666666666673,"status":"improvement"},{"start":191,"end":208,"length":18,"average":0.003880769230769231,"ratio":2.21516850138448,"diff":0.0000841025641025641,"status":"similar"},{"start":209,"end":217,"length":9,"average":0.0032655555555555554,"ratio":-13.988879133743058,"diff":-0.0005311111111111115,"status":"improvement"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_peak_me/mory_MB_.json b/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_peak_me/mory_MB_.json deleted file mode 100644 index bca9a1893dca5e15435b84a0625dc5e42aabaf13..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_peak_me/mory_MB_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13880},"assignee":{"compares":{}},"samples":{"13713":469.082112,"13714":468.676608,"13715":469.00019199999997,"13716":469.15993599999996,"13717":468.82815999999997,"13718":468.893696,"13719":469.26643199999995,"13720":469.31968,"13721":468.82406399999996,"13722":468.87731199999996,"13723":469.004288,"13724":469.04115199999995,"13725":469.250048,"13726":469.1968,"13727":469.27462399999996,"13728":469.254144,"13729":468.688896,"13730":469.17632,"13731":469.250048,"13732":469.004288,"13733":469.250048,"13734":469.05753599999997,"13735":468.844544,"13736":469.27462399999996,"13737":468.81587199999996,"13738":468.783104,"13739":469.311488,"13740":469.26643199999995,"13741":468.75443199999995,"13742":469.26643199999995,"13743":468.795392,"13744":468.836352,"13745":469.13536,"13746":469.25824,"13747":469.250048,"13748":468.98790399999996,"13749":469.42208,"13750":468.688896,"13752":469.037056,"13753":468.8896,"13754":468.8896,"13755":469.04934399999996,"13756":469.26643199999995,"13757":469.09439999999995,"13758":469.25824,"13759":469.082112,"13760":468.774912,"13761":469.09849599999995,"13762":468.8896,"13763":469.016576,"13764":469.44255999999996,"13765":469.22547199999997,"13766":469.471232,"13767":469.05753599999997,"13768":469.04934399999996,"13769":469.573632,"13770":469.061632,"13771":469.09439999999995,"13772":469.32377599999995,"13773":469.03296,"13774":469.10259199999996,"13775":469.008384,"13776":469.479424,"13777":469.33196799999996,"13778":469.10668799999996,"13779":469.123072,"13780":468.668416,"13781":469.352448,"13782":469.487616,"13783":468.93465599999996,"13784":468.836352,"13785":469.086208,"13786":469.311488,"13787":469.46304,"13788":469.184512,"13789":468.963328,"13790":468.979712,"13791":469.184512,"13792":469.086208,"13793":468.692992,"13794":468.76671999999996,"13795":468.897792,"13796":469.10259199999996,"13797":468.86911999999995,"13798":468.88550399999997,"13799":469.1968,"13800":469.21318399999996,"13801":469.372928,"13802":469.143552,"13803":469.67193599999996,"13804":469.143552,"13805":469.417984,"13806":468.959232,"13807":469.44255999999996,"13808":469.01248,"13809":469.086208,"13810":469.45075199999997,"13811":469.069824,"13812":469.2992,"13813":469.05343999999997,"13814":469.028864,"13815":469.36064,"13816":469.09439999999995,"13817":469.311488,"13818":469.090304,"13819":469.635072,"13820":469.573632,"13821":469.38521599999996,"13822":469.344256,"13823":469.532672,"13824":469.286912,"13825":469.204992,"13826":469.467136,"13827":469.38111999999995,"13828":469.774336,"13829":469.38931199999996,"13830":469.09849599999995,"13831":469.643264,"13832":469.72108799999995,"13833":469.34015999999997,"13834":469.27462399999996,"13835":469.44255999999996,"13836":469.123072,"13837":469.43846399999995,"13838":469.639168,"13839":469.45484799999997,"13840":469.21318399999996,"13841":469.372928,"13842":469.123072,"13843":469.04115199999995,"13844":469.532672,"13845":469.64736,"13846":469.291008,"13847":469.061632,"13848":469.25824,"13849":469.13536,"13850":469.262336,"13851":469.426176,"13852":469.245952,"13853":469.16403199999996,"13854":469.520384,"13855":469.15174399999995,"13856":469.356544,"13857":469.303296,"13858":469.48352,"13859":469.458944,"13860":469.458944,"13861":469.184512,"13862":469.33196799999996,"13863":469.84806399999997,"13864":469.61459199999996,"13865":469.303296,"13866":469.233664,"13867":468.86911999999995,"13868":469.712896,"13869":469.72108799999995,"13870":469.54496,"13871":469.286912,"13872":469.262336,"13873":469.536768,"13874":469.33196799999996,"13875":469.7088,"13876":469.27462399999996,"13877":469.143552,"13878":469.303296,"13879":469.487616,"13880":469.64736,"13881":469.33196799999996,"13882":469.348352,"13883":469.68012799999997,"13884":469.307392,"13885":469.184512,"13886":469.79071999999996,"13887":469.17222399999997,"13888":469.929984,"13889":469.38111999999995,"13890":469.54905599999995,"13891":469.434368,"13892":469.56134399999996,"13893":469.475328,"13894":469.467136,"13895":469.876736,"13896":469.307392,"13897":469.44255999999996,"13898":469.516288,"13899":469.684224,"13900":469.45484799999997,"13901":469.651456,"13902":469.62687999999997,"13903":469.44255999999996,"13904":469.50809599999997,"13905":469.037056,"13906":469.307392,"13907":469.815296,"13908":469.037056,"13909":469.803008,"13910":469.372928,"13911":469.16403199999996,"13912":469.409792,"13913":469.368832,"13914":469.1968,"13915":469.348352,"13916":469.21727999999996,"13917":469.42208,"13918":469.38111999999995,"13919":469.417984,"13920":469.21318399999996,"13921":468.860928,"13922":469.192704,"13923":469.16403199999996,"13924":469.23776,"13925":469.303296,"14018":555,"14019":554,"14020":555,"14021":556,"14022":555,"14023":555},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13880},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":166,"first":0},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":555,"ratio":18.23252547688723,"diff":85.58602295652173},"base":{"average":469.41397704347827}},"averages":[{"start":166,"end":211,"length":46,"average":469.41397704347827,"ratio":0,"diff":0,"status":"similar"},{"start":212,"end":217,"length":6,"average":555,"ratio":18.23252547688723,"diff":85.58602295652173,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_through/put_sample/s_s_.json b/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_through/put_sample/s_s_.json deleted file mode 100644 index fbd2ac01569b0d9405649d7b2401ad771fe9bf9a..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_gp/t2_0_forwa/rd_through/put_sample/s_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"13713":262,"13714":270,"13715":296,"13716":277,"13717":323,"13718":254,"13719":266,"13720":265,"13721":317,"13722":303,"13723":267,"13724":291,"13725":261,"13726":317,"13727":262,"13728":248,"13729":317,"13730":269,"13731":265,"13732":268,"13733":270,"13734":299,"13735":327,"13736":269,"13737":221,"13738":263,"13739":264,"13740":262,"13741":268,"13742":262,"13743":262,"13744":317,"13745":328,"13746":321,"13747":298,"13748":325,"13749":259,"13750":283,"13752":267,"13753":258,"13754":258,"13755":262,"13756":263,"13757":292,"13758":267,"13759":270,"13760":260,"13761":299,"13762":267,"13763":267,"13764":252,"13765":259,"13766":288,"13767":329,"13768":315,"13769":232,"13770":314,"13771":262,"13772":262,"13773":292,"13774":327,"13775":327,"13776":269,"13777":261,"13778":299,"13779":258,"13780":256,"13781":260,"13782":261,"13783":323,"13784":262,"13785":258,"13786":293,"13787":267,"13788":267,"13789":266,"13790":330,"13791":262,"13792":223,"13793":261,"13794":251,"13795":330,"13796":264,"13797":259,"13798":267,"13799":222,"13800":254,"13801":262,"13802":287,"13803":307,"13804":258,"13805":225,"13806":292,"13807":199,"13808":282,"13809":254,"13810":260,"13811":260,"13812":256,"13813":303,"13814":262,"13815":264,"13816":265,"13817":262,"13818":258,"13819":324,"13820":264,"13821":270,"13822":316,"13823":254,"13824":299,"13825":254,"13826":263,"13827":272,"13828":243,"13829":313,"13830":260,"13831":267,"13832":257,"13833":261,"13834":263,"13835":324,"13836":265,"13837":253,"13838":290,"13839":326,"13840":256,"13841":261,"13842":322,"13843":306,"13844":294,"13845":322,"13846":261,"13847":259,"13848":263,"13849":256,"13850":310,"13851":266,"13852":321,"13853":299,"13854":263,"13855":197,"13856":303,"13857":260,"13858":316,"13859":308,"13860":321,"13861":326,"13862":252,"13863":258,"13864":245,"13865":223,"13866":269,"13867":227,"13868":311,"13869":262,"13870":248,"13871":212,"13872":282,"13873":222,"13874":265,"13875":267,"13876":271,"13877":262,"13878":200,"13879":268,"13880":266,"13881":262,"13882":257,"13883":216,"13884":321,"13885":308,"13886":256,"13887":256,"13888":292,"13889":258,"13890":249,"13891":257,"13892":267,"13893":266,"13894":259,"13895":319,"13896":257,"13897":293,"13898":302,"13899":292,"13900":306,"13901":290,"13902":292,"13903":306,"13904":287,"13905":257,"13906":258,"13907":264,"13908":258,"13909":250,"13910":293,"13911":261,"13912":258,"13913":252,"13914":315,"13915":309,"13916":318,"13917":254,"13918":312,"13919":262,"13920":275,"13921":240,"13922":264,"13923":299,"13924":314,"13925":321,"14018":617,"14019":617,"14020":581,"14021":590,"14022":617,"14023":627},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":608.1666666666666,"ratio":130.8032890575585,"diff":344.66666666666663},"base":{"average":263.5}},"averages":[{"start":25,"end":30,"length":6,"average":263.5,"ratio":0,"diff":0,"status":"similar"},{"start":31,"end":35,"length":5,"average":317.8,"ratio":20.60721062618596,"diff":54.30000000000001,"status":"regression"},{"start":36,"end":42,"length":7,"average":264.2857142857143,"ratio":0.2981837896448871,"diff":0.7857142857142776,"status":"similar"},{"start":43,"end":182,"length":140,"average":263.77659574468083,"ratio":0.10496992208001249,"diff":0.2765957446808329,"status":"similar"},{"start":183,"end":190,"length":8,"average":296,"ratio":12.333965844402277,"diff":32.5,"status":"regression"},{"start":191,"end":211,"length":21,"average":257.9230769230769,"ratio":-2.1164793460808706,"diff":-5.576923076923094,"status":"similar"},{"start":212,"end":217,"length":6,"average":608.1666666666666,"ratio":130.8032890575585,"diff":344.66666666666663,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_latenc/y_s_.json b/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_latenc/y_s_.json deleted file mode 100644 index fb5f19b78f605da3ba1c684f6dde667f0629d4a6..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_latenc/y_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"13713":0.502,"13714":0.586,"13715":0.481,"13716":0.579,"13717":0.487,"13718":0.491,"13719":0.519,"13720":0.484,"13721":0.485,"13722":0.482,"13723":0.567,"13724":0.479,"13725":0.513,"13726":0.483,"13727":0.482,"13728":0.48,"13729":0.482,"13730":0.497,"13731":0.511,"13732":0.519,"13733":0.504,"13734":0.482,"13735":0.481,"13736":0.563,"13737":0.494,"13738":0.532,"13739":0.563,"13740":0.479,"13741":0.487,"13742":0.526,"13743":0.486,"13744":0.481,"13745":0.484,"13746":0.481,"13747":0.48,"13748":0.479,"13749":0.486,"13750":0.484,"13752":0.571,"13753":0.505,"13754":0.482,"13755":0.543,"13756":0.502,"13757":0.48,"13758":0.498,"13759":0.493,"13760":0.54,"13761":0.493,"13762":0.526,"13763":0.517,"13764":0.529,"13765":0.496,"13766":0.493,"13767":0.486,"13768":0.485,"13769":0.515,"13770":0.494,"13771":0.51,"13772":0.509,"13773":0.484,"13774":0.488,"13775":0.509,"13776":0.544,"13777":0.533,"13778":0.483,"13779":0.494,"13780":0.531,"13781":0.52,"13782":0.527,"13783":0.481,"13784":0.505,"13785":0.56,"13786":0.482,"13787":0.517,"13788":0.533,"13789":0.481,"13790":0.485,"13791":0.563,"13792":0.495,"13793":0.497,"13794":0.588,"13795":0.485,"13796":0.543,"13797":0.511,"13798":0.525,"13799":0.528,"13800":0.492,"13801":0.507,"13802":0.514,"13803":0.757,"13804":0.599,"13805":0.519,"13806":1.01,"13807":0.488,"13808":0.482,"13809":0.529,"13810":0.524,"13811":0.488,"13812":0.515,"13813":0.483,"13814":0.486,"13815":0.493,"13816":0.513,"13817":0.486,"13818":0.513,"13819":0.479,"13820":0.533,"13821":0.49,"13822":0.48,"13823":0.486,"13824":0.481,"13825":0.518,"13826":0.519,"13827":0.494,"13828":0.541,"13829":0.482,"13830":0.557,"13831":0.486,"13832":0.583,"13833":0.55,"13834":0.519,"13835":0.488,"13836":0.508,"13837":0.502,"13838":0.483,"13839":0.482,"13840":0.533,"13841":0.48,"13842":0.481,"13843":0.48,"13844":0.481,"13845":0.488,"13846":0.492,"13847":0.516,"13848":0.56,"13849":0.5,"13850":0.491,"13851":0.489,"13852":0.536,"13853":0.485,"13854":0.492,"13855":11.2,"13856":0.506,"13857":0.497,"13858":0.484,"13859":0.492,"13860":0.485,"13861":0.536,"13862":0.492,"13863":0.548,"13864":0.501,"13865":1.08,"13866":0.493,"13867":0.513,"13868":0.494,"13869":0.538,"13870":0.519,"13871":0.515,"13872":0.503,"13873":0.529,"13874":0.544,"13875":0.503,"13876":0.556,"13877":0.496,"13878":0.494,"13879":0.523,"13880":0.509,"13881":0.541,"13882":0.497,"13883":0.493,"13884":0.488,"13885":0.491,"13886":0.505,"13887":0.527,"13888":0.49,"13889":0.512,"13890":0.537,"13891":0.483,"13892":0.528,"13893":0.494,"13894":0.503,"13895":0.482,"13896":0.586,"13897":0.581,"13898":0.484,"13899":0.514,"13900":0.495,"13901":0.538,"13902":0.588,"13903":0.568,"13904":0.482,"13905":0.508,"13906":0.511,"13907":0.54,"13908":0.55,"13909":0.525,"13910":0.486,"13911":0.512,"13912":0.487,"13913":0.502,"13914":0.522,"13915":0.485,"13916":0.485,"13917":0.532,"13918":0.488,"13919":0.49,"13920":0.497,"13921":0.481,"13922":0.504,"13923":0.492,"13924":0.49,"13925":0.497,"14018":0.307,"14019":0.3,"14020":0.306,"14021":0.299,"14022":0.311,"14023":0.299},"state":{"analyse":"improvementNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"improvement","current":{"average":0.30366666666666664,"ratio":-38.244900474517785,"diff":-0.1880606060606061},"base":{"average":0.49172727272727274}},"averages":[{"start":4,"end":40,"length":37,"average":0.49172727272727274,"ratio":0,"diff":0,"status":"similar"},{"start":41,"end":211,"length":171,"average":0.5043161290322578,"ratio":2.560129771766227,"diff":0.012588856304985019,"status":"similar"},{"start":212,"end":217,"length":6,"average":0.30366666666666664,"ratio":-38.244900474517785,"diff":-0.1880606060606061,"status":"improvement"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_peak_m/emory_MB_.json b/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_peak_m/emory_MB_.json deleted file mode 100644 index 8e446b9da25a8e855766256fa2dbdeba2ed7f9d6..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_peak_m/emory_MB_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":558,"14019":560,"14020":559,"14021":561,"14022":558,"14023":560},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":559.3333333333334,"ratio":0,"diff":0},"base":{"average":559.3333333333334}},"averages":[{"start":0,"end":5,"length":6,"average":559.3333333333334,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_throug/hput_token/s_s_.json b/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_throug/hput_token/s_s_.json deleted file mode 100644 index c5b7a0ed5f16e19da92e58601a6d7276393106ce..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/pytorch_gp/t2_0_gener/ate_throug/hput_token/s_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13732},"assignee":{"compares":{}},"samples":{"13713":199,"13714":171,"13715":208,"13716":173,"13717":205,"13718":204,"13719":193,"13720":207,"13721":206,"13722":207,"13723":176,"13724":209,"13725":195,"13726":207,"13727":207,"13728":208,"13729":207,"13730":201,"13731":196,"13732":193,"13733":198,"13734":207,"13735":208,"13736":178,"13737":202,"13738":188,"13739":178,"13740":209,"13741":205,"13742":190,"13743":206,"13744":208,"13745":207,"13746":208,"13747":208,"13748":209,"13749":206,"13750":207,"13752":175,"13753":198,"13754":207,"13755":184,"13756":199,"13757":208,"13758":201,"13759":203,"13760":185,"13761":203,"13762":190,"13763":193,"13764":189,"13765":202,"13766":203,"13767":206,"13768":206,"13769":194,"13770":202,"13771":196,"13772":196,"13773":207,"13774":205,"13775":196,"13776":184,"13777":188,"13778":207,"13779":202,"13780":188,"13781":192,"13782":190,"13783":208,"13784":198,"13785":179,"13786":207,"13787":193,"13788":188,"13789":208,"13790":206,"13791":178,"13792":202,"13793":201,"13794":170,"13795":206,"13796":184,"13797":196,"13798":190,"13799":189,"13800":203,"13801":197,"13802":195,"13803":132,"13804":167,"13805":193,"13806":99,"13807":205,"13808":207,"13809":189,"13810":191,"13811":205,"13812":194,"13813":207,"13814":206,"13815":203,"13816":195,"13817":206,"13818":195,"13819":209,"13820":188,"13821":204,"13822":208,"13823":206,"13824":208,"13825":193,"13826":193,"13827":202,"13828":185,"13829":207,"13830":180,"13831":206,"13832":172,"13833":182,"13834":193,"13835":205,"13836":197,"13837":199,"13838":207,"13839":207,"13840":188,"13841":208,"13842":208,"13843":208,"13844":208,"13845":205,"13846":203,"13847":194,"13848":179,"13849":200,"13850":204,"13851":204,"13852":187,"13853":206,"13854":203,"13855":8.93,"13856":198,"13857":201,"13858":207,"13859":203,"13860":206,"13861":187,"13862":203,"13863":182,"13864":200,"13865":92.6,"13866":203,"13867":195,"13868":202,"13869":186,"13870":193,"13871":194,"13872":199,"13873":189,"13874":184,"13875":199,"13876":180,"13877":202,"13878":202,"13879":191,"13880":196,"13881":185,"13882":201,"13883":203,"13884":205,"13885":204,"13886":198,"13887":190,"13888":204,"13889":195,"13890":186,"13891":207,"13892":189,"13893":202,"13894":199,"13895":207,"13896":171,"13897":172,"13898":207,"13899":195,"13900":202,"13901":186,"13902":170,"13903":176,"13904":207,"13905":197,"13906":196,"13907":185,"13908":182,"13909":190,"13910":206,"13911":195,"13912":205,"13913":199,"13914":192,"13915":206,"13916":206,"13917":188,"13918":205,"13919":204,"13920":201,"13921":208,"13922":198,"13923":203,"13924":204,"13925":201,"14018":651,"14019":667,"14020":654,"14021":669,"14022":643,"14023":669},"state":{"analyse":"regressionNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"},"base":13732},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":19,"first":1},"summary":{"lastBuildId":14023,"status":"regression","current":{"average":658.8333333333334,"ratio":231.18034899861192,"diff":459.8984220907298},"base":{"average":198.93491124260356}},"averages":[{"start":24,"end":26,"length":3,"average":189.33333333333334},{"start":27,"end":211,"length":185,"average":198.93491124260356,"ratio":0,"diff":0,"status":"similar"},{"start":212,"end":217,"length":6,"average":658.8333333333334,"ratio":231.18034899861192,"diff":459.8984220907298,"status":"regression"}]}} \ No newline at end of file diff --git a/dana/configs/db/Training/infos/benchmarks.series.json b/dana/configs/db/Training/infos/benchmarks.series.json deleted file mode 100644 index 695f2eb73b763cff5972bd463da53aaf5ded7b59..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/infos/benchmarks.series.json +++ /dev/null @@ -1 +0,0 @@ -{"bert_1gpu_0_warmup_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_0_warmup_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_0_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_0_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_0_overall_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_0_overall_training_throughput_samles_s_":{"status":{"error":"Unable to find first average","lastBuildId":14020},"state":"similarNeedstriage"},"bert_1gpu_1_warmup_runtime_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":4.30402131875356,"ratio":0,"diff":0},"base":{"average":4.30402131875356}},"state":"similarNeedstriage"},"bert_1gpu_1_warmup_throughput_samples_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":148.70454278133067,"ratio":0,"diff":0},"base":{"average":148.70454278133067}},"state":"similarNeedstriage"},"bert_1gpu_1_training_runtime_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":30.990617950757343,"ratio":0,"diff":0},"base":{"average":30.990617950757343}},"state":"similarNeedstriage"},"bert_1gpu_1_training_throughput_samples_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":172.95741408576032,"ratio":0,"diff":0},"base":{"average":172.95741408576032}},"state":"similarNeedstriage"},"bert_1gpu_1_overall_training_runtime_s_":{"status":{"lastBuildId":14023,"status":"similar","current":{"average":35.29464058081309,"ratio":0,"diff":0},"base":{"average":35.29464058081309}},"state":"similarNeedstriage"},"bert_1gpu_1_overall_training_throughput_samles_s_":{"status":{"error":"Unable to find first average","lastBuildId":14020},"state":"similarNeedstriage"},"bert_1gpu_2_warmup_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_2_warmup_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_2_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_2_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_2_overall_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_2_overall_training_throughput_samles_s_":{"status":{"error":"Unable to find first average","lastBuildId":14020},"state":"similarNeedstriage"},"bert_1gpu_3_warmup_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_3_warmup_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_3_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_3_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_3_overall_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_3_overall_training_throughput_samles_s_":{"status":{"error":"Unable to find first average","lastBuildId":14020},"state":"similarNeedstriage"},"bert_1gpu_0_overall_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_1_overall_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_2_overall_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"},"bert_1gpu_3_overall_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14023},"state":"similarNeedstriage"}} \ No newline at end of file diff --git a/dana/configs/db/Training/infos/benchmarks.statusSeries.json b/dana/configs/db/Training/infos/benchmarks.statusSeries.json deleted file mode 100644 index 1efd5f2290a2f8fae53795da2c784f9f465786ee..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/infos/benchmarks.statusSeries.json +++ /dev/null @@ -1 +0,0 @@ -{"0":{"numSeries":24,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":24,"time":1695223623817},"1":{"numSeries":24,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":24,"time":1695224061438},"2":{"numSeries":24,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":24,"time":1695226035839},"3":{"numSeries":28,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":28,"time":1695226444985},"4":{"numSeries":28,"numSeriesSimilar":5,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":23,"time":1695236320341}} \ No newline at end of file diff --git a/dana/configs/db/Training/infos/builds.json b/dana/configs/db/Training/infos/builds.json deleted file mode 100644 index c3e6474430865dd0b4d58d3c682fcb515741c0fb..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/infos/builds.json +++ /dev/null @@ -1 +0,0 @@ -{"13713":{"buildId":13713,"infos":{"hash":"e50c9253f3a38d9db56c02d3d8d04e2f20070de8","abbrevHash":"e50c9253","authorName":"amyeroberts","authorEmail":"22614925+amyeroberts@users.noreply.github.com","subject":"YOLOS - reset default return_pixel_mask value (#25559)","url":null}},"13714":{"buildId":13714,"infos":{"hash":"8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6","abbrevHash":"8992589d","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip `test_onnx_runtime_optimize` for now (#25560)","url":null}},"13715":{"buildId":13715,"infos":{"hash":"e7e9261a202dd5623f488f1cb05007e88629f275","abbrevHash":"e7e9261a","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Docs`] Fix un-rendered images (#25561)","url":null}},"13716":{"buildId":13716,"infos":{"hash":"1791ef8df647a38b4fcb96c14ddd83a43861d713","abbrevHash":"1791ef8d","authorName":"Alex McKinney","authorEmail":"44398246+vvvm23@users.noreply.github.com","subject":"Adds `TRANSFORMERS_TEST_DEVICE` (#25506)","url":null}},"13717":{"buildId":13717,"infos":{"hash":"d2871b29754abd0f72cf42c299bb1c041519f7bc","abbrevHash":"d2871b29","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip `test_beam_search_xla_generate_simple` for `T5` (#25566)","url":null}},"13718":{"buildId":13718,"infos":{"hash":"d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad","abbrevHash":"d6bf08f7","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`resize_embedding`] Introduce `pad_to_multiple_of` and guidance (#25088)","url":null}},"13719":{"buildId":13719,"infos":{"hash":"5347d00092c4f2429389269dd912417e8daff848","abbrevHash":"5347d000","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`SwitchTransformers`] Remove unused module (#25427)","url":null}},"13720":{"buildId":13720,"infos":{"hash":"b4d554880013bf97718e1e1332715eeaba7dee17","abbrevHash":"b4d55488","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"🚨🚨🚨 [`SPM`] Finish fix spm models 🚨🚨🚨 (#25224)","url":null}},"13721":{"buildId":13721,"infos":{"hash":"9264fc915a3295c6fd0e05f54ee409917ac43f60","abbrevHash":"9264fc91","authorName":"Sina","authorEmail":"sina.moeini@gmail.com","subject":"Inconsistency in PreTrainedModel.resize_token_embeddings When ZeRO3 Is Enabled (#25394)","url":null}},"13722":{"buildId":13722,"infos":{"hash":"181d778f83bf6e58c1d69a7599afb2bb9ceff21e","abbrevHash":"181d778f","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`NllbMoe`] Update code to properly support loss computation (#25429)","url":null}},"13723":{"buildId":13723,"infos":{"hash":"d4c0aa1443557981a0690c0593be7b0f6ffd53cf","abbrevHash":"d4c0aa14","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Tests`] Fix failing 8bit test (#25564)","url":null}},"13724":{"buildId":13724,"infos":{"hash":"4e1dee0e8e06c1146d023c43812b88bfe2763329","abbrevHash":"4e1dee0e","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"Revert \"change version (#25387)\" (#25573)","url":null}},"13725":{"buildId":13725,"infos":{"hash":"c4c0ceff096473cb4e47ef2f067640bcdf0b32e0","abbrevHash":"c4c0ceff","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"add util for ram efficient loading of model when using fsdp (#25107)","url":null}},"13726":{"buildId":13726,"infos":{"hash":"b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0","abbrevHash":"b8f69d0d","authorName":"Yoach Lacombe","authorEmail":"52246514+ylacombe@users.noreply.github.com","subject":"Add Text-To-Speech pipeline (#24952)","url":null}},"13727":{"buildId":13727,"infos":{"hash":"427adc898ab49c321d58ff4011fa54133adf62c2","abbrevHash":"427adc89","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip `test_contrastive_generate` for `TFXLNet` (#25574)","url":null}},"13728":{"buildId":13728,"infos":{"hash":"4a27c13f1eee26393d60d381e500e1a61970e8ee","abbrevHash":"4a27c13f","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"add warning for 8bit optimizers (#25575)","url":null}},"13729":{"buildId":13729,"infos":{"hash":"659ab0423e6492b079d3df131445a39dda0651cb","abbrevHash":"659ab042","authorName":"AmΓ©lie T. Reymond","authorEmail":"amelietamrey@gmail.com","subject":"Fix typo in example code (#25583)","url":null}},"13730":{"buildId":13730,"infos":{"hash":"08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8","abbrevHash":"08e32519","authorName":"Kihoon Son","authorEmail":"75935546+kihoon71@users.noreply.github.com","subject":"Suggestions on Pipeline_webserver (#25570)","url":null}},"13731":{"buildId":13731,"infos":{"hash":"940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7","abbrevHash":"940d1a76","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Docs` / `BetterTransformer` ] Added more details about flash attention + SDPA (#25265)","url":null}},"13732":{"buildId":13732,"infos":{"hash":"c45aab75356563dbb8124aafbc2699853e177873","abbrevHash":"c45aab75","authorName":"Martin Malmsten","authorEmail":"martin@martinmalmsten.net","subject":"Added missing parenthesis in call to is_fsdp_enabled (#25585)","url":null}},"13733":{"buildId":13733,"infos":{"hash":"9d7afd2536ecd9816dd2ea9592a01e52fec17d17","abbrevHash":"9d7afd25","authorName":"Alex McKinney","authorEmail":"44398246+vvvm23@users.noreply.github.com","subject":"Replaces calls to `.cuda` with `.to(torch_device)` in tests (#25571)","url":null}},"13734":{"buildId":13734,"infos":{"hash":"30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e","abbrevHash":"30b3c46f","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`split_special_tokens`] Add support for `split_special_tokens` argument to encode (#25081)","url":null}},"13735":{"buildId":13735,"infos":{"hash":"bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5","abbrevHash":"bc3e20dc","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`Llama`] remove prompt and fix prefix finetuning (#25565)","url":null}},"13736":{"buildId":13736,"infos":{"hash":"8d2f953f4a59a6a6f337a75ef75bb8a78260ef73","abbrevHash":"8d2f953f","authorName":"Kashif Rasul","authorEmail":"kashif.rasul@gmail.com","subject":"[Time series Informer] fix dtype of cumsum (#25431)","url":null}},"13737":{"buildId":13737,"infos":{"hash":"636acc75b089aa3ce14b48ed3d9d6555565d1a6d","abbrevHash":"636acc75","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"fix z3 init when using accelerate launcher (#25589)","url":null}},"13738":{"buildId":13738,"infos":{"hash":"ef1534252f76231b4a6403c71866d4376e35292d","abbrevHash":"ef153425","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`TokenizerFast`] Fix setting prefix space in __init__ (#25563)","url":null}},"13739":{"buildId":13739,"infos":{"hash":"faed2ca46fb163082d154aa234fd5d30682d6bf1","abbrevHash":"faed2ca4","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`PEFT`] Peft integration alternative design (#25077)","url":null}},"13740":{"buildId":13740,"infos":{"hash":"6f4424bb086d3d090855862be5aff64eb8ed7101","abbrevHash":"6f4424bb","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Make TTS automodels importable (#25595)","url":null}},"13741":{"buildId":13741,"infos":{"hash":"4d64157ed3795090110dd8aceb9b7a5ff78bb247","abbrevHash":"4d64157e","authorName":"Hyeonseo Yun","authorEmail":"0525yhs@gmail.com","subject":"🌐 [i18n-KO] Translated `perf_train_tpu_tf.md` to Korean (#25433)","url":null}},"13742":{"buildId":13742,"infos":{"hash":"6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7","abbrevHash":"6c811a32","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"new model: IDEFICS via HuggingFaceM4 (#24796)","url":null}},"13743":{"buildId":13743,"infos":{"hash":"6b82d936d49956ba7b43c5ee590f4868de373b65","abbrevHash":"6b82d936","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"reattach hooks when using `resize_token_embeddings` (#25596)","url":null}},"13744":{"buildId":13744,"infos":{"hash":"1982dd3b15867c46e1c20645901b0de469fd935f","abbrevHash":"1982dd3b","authorName":"ydshieh","authorEmail":"ydshieh@users.noreply.github.com","subject":"Hotfix","url":null}},"13745":{"buildId":13745,"infos":{"hash":"f92cc7034a49959b247a46a210b912e56a6f977d","abbrevHash":"f92cc703","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Ignore all exceptions from signal in dynamic code (#25623)","url":null}},"13746":{"buildId":13746,"infos":{"hash":"9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0","abbrevHash":"9627c3da","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"Fix PEFT integration failures on nightly CI (#25624)","url":null}},"13747":{"buildId":13747,"infos":{"hash":"f09db47a71ddef60ccc120b953ee32326c9253a3","abbrevHash":"f09db47a","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Run doctest for new files (#25588)","url":null}},"13748":{"buildId":13748,"infos":{"hash":"2f8acfea1ca11fe3479fb379ccbded516d0cff57","abbrevHash":"2f8acfea","authorName":"Francisco Kurucz","authorEmail":"juanfkurucz@gmail.com","subject":"Fix test_modeling_mpt typo in model id (#25606)","url":null}},"13749":{"buildId":13749,"infos":{"hash":"5c67682b169576c4859700d551090ff79d450a9a","abbrevHash":"5c67682b","authorName":"Sylvain Gugger","authorEmail":"Sylvain.gugger@gmail.com","subject":"v4.33.0.dev0","url":null}},"13750":{"buildId":13750,"infos":{"hash":"e769ca3d287274143501b2803275367b2bff3e6a","abbrevHash":"e769ca3d","authorName":"Pranith Pashikanti","authorEmail":"117859007+pranith7@users.noreply.github.com","subject":"Added paper links in logitprocess.py (#25482)","url":null}},"13752":{"buildId":13752,"infos":{"hash":"2582bbde2ed3ee1b25c5886df35c07376ee930c4","abbrevHash":"2582bbde","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"fix ACT_FN (#25627)","url":null}},"13753":{"buildId":13753,"infos":{"hash":"2df24228d68872d79304b932a68cf56de3061f5b","abbrevHash":"2df24228","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip doctest for some recent files (#25631)","url":null}},"13754":{"buildId":13754,"infos":{"hash":"8608bf2049a10f8d23043e1bb196707a1c1b3fe5","abbrevHash":"8608bf20","authorName":"Rafael Padilla","authorEmail":"31217453+rafaelpadilla@users.noreply.github.com","subject":"🚨🚨🚨 changing default threshold and applying threshold before the rescale (#25608)","url":null}},"13755":{"buildId":13755,"infos":{"hash":"6f041fcbb853adc6c37da85515384ed9a9c5b181","abbrevHash":"6f041fcb","authorName":"mchau","authorEmail":"minhtriet09@gmail.com","subject":"fix documentation for CustomTrainer (#25635)","url":null}},"13756":{"buildId":13756,"infos":{"hash":"450a181d8b963b4e896be4aac701815aa554a6bb","abbrevHash":"450a181d","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Add Pop2Piano (#21785)","url":null}},"13757":{"buildId":13757,"infos":{"hash":"58c36bea74ef8f5a4464d04ab2191d0b1bec6de7","abbrevHash":"58c36bea","authorName":"Joe Mifsud","authorEmail":"jmif96@gmail.com","subject":"Support specifying revision in push_to_hub (#25578)","url":null}},"13758":{"buildId":13758,"infos":{"hash":"182b83749a7058547e1e882c603cbf97e20259f8","abbrevHash":"182b8374","authorName":"Tanay Mehta","authorEmail":"heyytanay@gmail.com","subject":"Add Number Normalisation for SpeechT5 (#25447)","url":null}},"13759":{"buildId":13759,"infos":{"hash":"6a314ea7cd01a78a58403bc83e7c637ef83e6b26","abbrevHash":"6a314ea7","authorName":"Blake Wyatt","authorEmail":"894305+xNul@users.noreply.github.com","subject":"[DOCS] MusicGen Docs Update (#25510)","url":null}},"13760":{"buildId":13760,"infos":{"hash":"88e51ba30673b42fa93b2e15760dd645d50753f0","abbrevHash":"88e51ba3","authorName":"Christopher Akiki","authorEmail":"christopher.akiki@protonmail.com","subject":"[MINOR:TYPO] (#25646)","url":null}},"13761":{"buildId":13761,"infos":{"hash":"edb28722c2e100a5d43e307bd4c59169c0cf86b8","abbrevHash":"edb28722","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Pass the proper token to PEFT integration in auto classes (#25649)","url":null}},"13762":{"buildId":13762,"infos":{"hash":"36291906896904b47692c707471de9a4a963335d","abbrevHash":"36291906","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Put IDEFICS in the right section of the doc (#25650)","url":null}},"13763":{"buildId":13763,"infos":{"hash":"62396cff46854dc53023236cfeb785993fa70067","abbrevHash":"62396cff","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"TF 2.14 compatibility (#25630)","url":null}},"13764":{"buildId":13764,"infos":{"hash":"e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c","abbrevHash":"e20fab0b","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"Fix bloom add prefix space (#25652)","url":null}},"13765":{"buildId":13765,"infos":{"hash":"fd56f7f0813d412c3e0848cbd6f94a23de2c07b7","abbrevHash":"fd56f7f0","authorName":"Rafael Padilla","authorEmail":"31217453+rafaelpadilla@users.noreply.github.com","subject":"removing unnecesssary extra parameter (#25643)","url":null}},"13766":{"buildId":13766,"infos":{"hash":"5eeaef921f70acd68073d1066ccb09d7c6e6f475","abbrevHash":"5eeaef92","authorName":"Alex McKinney","authorEmail":"44398246+vvvm23@users.noreply.github.com","subject":"Adds `TRANSFORMERS_TEST_BACKEND` (#25655)","url":null}},"13767":{"buildId":13767,"infos":{"hash":"908f853688c4d523780797f27f83af3c10418e92","abbrevHash":"908f8536","authorName":"AleksanderWWW","authorEmail":"alwojnarowicz@gmail.com","subject":"stringify config (#25637)","url":null}},"13768":{"buildId":13768,"infos":{"hash":"977b2f05d5697f33e51111e4834a127a9a76349f","abbrevHash":"977b2f05","authorName":"Gabriel Asher","authorEmail":"85761680+gaasher@users.noreply.github.com","subject":"Add input_embeds functionality to gpt_neo Causal LM (#25659)","url":null}},"13769":{"buildId":13769,"infos":{"hash":"40a0cabd93f86a7c09406159ad03a3804c2940da","abbrevHash":"40a0cabd","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Update doc toctree (#25661)","url":null}},"13770":{"buildId":13770,"infos":{"hash":"57943630e24651e6d954b912e7fcdb2b4c719cc4","abbrevHash":"57943630","authorName":"Wonhyeong Seo","authorEmail":"wonhseo@kakao.com","subject":"Add Llama2 resources (#25531)","url":null}},"13771":{"buildId":13771,"infos":{"hash":"51794bf21ee6c9b9a702a3bceeea167e9518880b","abbrevHash":"51794bf2","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`SPM`] Patch `spm` Llama and T5 (#25656)","url":null}},"13772":{"buildId":13772,"infos":{"hash":"db587220844538787f560c8a797f1268fef9099d","abbrevHash":"db587220","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`GPTNeo`] Add input_embeds functionality to gpt_neo Causal LM (#25664)","url":null}},"13773":{"buildId":13773,"infos":{"hash":"3d1edb6c5d36bf6426e72223f534266ff29c45c4","abbrevHash":"3d1edb6c","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"fix wrong path in some doc (#25658)","url":null}},"13774":{"buildId":13774,"infos":{"hash":"b413e0610b42d4c8d9c7a69c06440ad27c69808b","abbrevHash":"b413e061","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Remove `utils/documentation_tests.txt` (#25680)","url":null}},"13775":{"buildId":13775,"infos":{"hash":"2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43","abbrevHash":"2cf87e2b","authorName":"Nora Belrose","authorEmail":"39116809+norabelrose@users.noreply.github.com","subject":"Prevent Dynamo graph fragmentation in GPTNeoX with torch.baddbmm fix (#24941)","url":null}},"13776":{"buildId":13776,"infos":{"hash":"77cb2ab7921c5b2336916eb7874c807bf86ad33c","abbrevHash":"77cb2ab7","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"⚠️ [CLAP] Fix dtype of logit scales in init (#25682)","url":null}},"13777":{"buildId":13777,"infos":{"hash":"8657ec68fc01c289245f3c71725353eef055fc3c","abbrevHash":"8657ec68","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Sets the stalebot to 10 AM CEST (#25678)","url":null}},"13778":{"buildId":13778,"infos":{"hash":"2189a7f54a5ec10a7559a93fa7e6eaca527d2941","abbrevHash":"2189a7f5","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix `pad_token` check condition (#25685)","url":null}},"13779":{"buildId":13779,"infos":{"hash":"6add3b313defc35b5d8ae3d946131aeb625e0441","abbrevHash":"6add3b31","authorName":"sanjeevk-os","authorEmail":"73068589+sanjeevk-os@users.noreply.github.com","subject":"[DOCS] Added docstring example for EpsilonLogitsWarper #24783 (#25378)","url":null}},"13780":{"buildId":13780,"infos":{"hash":"656e17f6f7eded9df87ad59cbd064fdf5f44f708","abbrevHash":"656e17f6","authorName":"Phuc Van Phan","authorEmail":"phanphuc1100@gmail.com","subject":"correct resume training steps number in progress bar (#25691)","url":null}},"13781":{"buildId":13781,"infos":{"hash":"3c2383b1c6eb860c0511d081e670d1782cd66b8d","abbrevHash":"3c2383b1","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: general test for decoder-only generation from `inputs_embeds` (#25687)","url":null}},"13782":{"buildId":13782,"infos":{"hash":"4d40109c3a93c9b8bbca204cb046ed510f1c72e8","abbrevHash":"4d40109c","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Fix typo in `configuration_gpt2.py` (#25676)","url":null}},"13783":{"buildId":13783,"infos":{"hash":"68fa9a5937ae7aa707f5ff2639aa36a37a0a9928","abbrevHash":"68fa9a59","authorName":"Sylvain Gugger","authorEmail":"Sylvain.gugger@gmail.com","subject":"Skip broken tests","url":null}},"13784":{"buildId":13784,"infos":{"hash":"b85b88069a778f0ffbb7a0f6389e18fca9432dcf","abbrevHash":"b85b8806","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"fix ram efficient fsdp init (#25686)","url":null}},"13785":{"buildId":13785,"infos":{"hash":"6e6da5e4b860d98d3b625fe5c63db4e83087b6ff","abbrevHash":"6e6da5e4","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`LlamaTokenizer`] make unk_token_length a property (#25689)","url":null}},"13786":{"buildId":13786,"infos":{"hash":"c2123626aa3cd6c1ae4869ec9bc8869d1a408166","abbrevHash":"c2123626","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Update list of persons to tag (#25708)","url":null}},"13787":{"buildId":13787,"infos":{"hash":"f01459c75db47308698b19b8b1bac1ae1159cd31","abbrevHash":"f01459c7","authorName":"Tom Aarsen","authorEmail":"37621491+tomaarsen@users.noreply.github.com","subject":"docs: Resolve typos in warning text (#25711)","url":null}},"13788":{"buildId":13788,"infos":{"hash":"8fff61b9db86ac3ad92deea48d504b5dafc3b78e","abbrevHash":"8fff61b9","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix failing `test_batch_generation` for bloom (#25718)","url":null}},"13789":{"buildId":13789,"infos":{"hash":"70b49f023c9f6579c516671604468a491227b4da","abbrevHash":"70b49f02","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`PEFT`] Fix peft version (#25710)","url":null}},"13790":{"buildId":13790,"infos":{"hash":"2febd506149d039b51590f5dc7b45f0d8624819d","abbrevHash":"2febd506","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Fix number of minimal calls to the Hub with peft integration (#25715)","url":null}},"13791":{"buildId":13791,"infos":{"hash":"584eeb5387193d352da976cc3d1305f5c3404850","abbrevHash":"584eeb53","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`AutoGPTQ`] Add correct installation of GPTQ library + fix slow tests (#25713)","url":null}},"13792":{"buildId":13792,"infos":{"hash":"0a365c3e6a0e174302debff4023182838607acf1","abbrevHash":"0a365c3e","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: nudge towards `do_sample=False` when `temperature=0.0` (#25722)","url":null}},"13793":{"buildId":13793,"infos":{"hash":"fecf08560cd9843b569279dd6f665c987890af4c","abbrevHash":"fecf0856","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`from_pretrained`] Simpler code for peft (#25726)","url":null}},"13794":{"buildId":13794,"infos":{"hash":"7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b","abbrevHash":"7a6efe1e","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"[idefics] idefics-9b test use 4bit quant (#25734)","url":null}},"13795":{"buildId":13795,"infos":{"hash":"1b2381c46b834a89e447f7a01f0961c4e940d117","abbrevHash":"1b2381c4","authorName":"amyeroberts","authorEmail":"22614925+amyeroberts@users.noreply.github.com","subject":"ImageProcessor - check if input pixel values between 0-255 (#25688)","url":null}},"13796":{"buildId":13796,"infos":{"hash":"fd0b94fd7b0c00c68e2e9f054793287808e33608","abbrevHash":"fd0b94fd","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`from_pretrained`] Fix failing PEFT tests (#25733)","url":null}},"13797":{"buildId":13797,"infos":{"hash":"021887682224daf29264f98c759a45e88c82e244","abbrevHash":"02188768","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[ASR Pipe Test] Fix CTC timestamps error message (#25727)","url":null}},"13798":{"buildId":13798,"infos":{"hash":"f26099e7b5cf579f99a42bab6ddd371bf2c8d548","abbrevHash":"f26099e7","authorName":"Wonhyeong Seo","authorEmail":"wonhseo@kakao.com","subject":"🌐 [i18n-KO] Translated `visual_question_answering.md` to Korean (#25679)","url":null}},"13799":{"buildId":13799,"infos":{"hash":"ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512","abbrevHash":"ae320fa5","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`PEFT`] Fix PeftConfig save pretrained when calling `add_adapter` (#25738)","url":null}},"13800":{"buildId":13800,"infos":{"hash":"8968fface4e804f380391d880f569578b84b4121","abbrevHash":"8968ffac","authorName":"Anthony Susevski","authorEmail":"77211520+asusevski@users.noreply.github.com","subject":"fixed typo in speech encoder decoder doc (#25745)","url":null}},"13801":{"buildId":13801,"infos":{"hash":"cb8e3ee25fc2349e9262faa1e0c35d80978349fe","abbrevHash":"cb8e3ee2","authorName":"Pedro Cuenca","authorEmail":"pedro@huggingface.co","subject":"Add FlaxCLIPTextModelWithProjection (#25254)","url":null}},"13802":{"buildId":13802,"infos":{"hash":"85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b","abbrevHash":"85cf90a1","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: add missing logits processors docs (#25653)","url":null}},"13803":{"buildId":13803,"infos":{"hash":"c6a84b72025fa7795f7fb5c97e3de7861a4dfb01","abbrevHash":"c6a84b72","authorName":"Jess","authorEmail":"jessbpeck@gmail.com","subject":"[DOCS] Add example for HammingDiversityLogitsProcessor (#25481)","url":null}},"13804":{"buildId":13804,"infos":{"hash":"494e96d8d61277cd7509e5f90aa14e6ac604063a","abbrevHash":"494e96d8","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: logits processors are doctested and fix broken doctests (#25692)","url":null}},"13805":{"buildId":13805,"infos":{"hash":"0770ce6cfbcd8334084f9f2c4302e8c71ac931ee","abbrevHash":"0770ce6c","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[CLAP] Fix logit scales dtype for fp16 (#25754)","url":null}},"13806":{"buildId":13806,"infos":{"hash":"dd8b7d28aec80013ad2b25ead4200eea1a6a767e","abbrevHash":"dd8b7d28","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`Sentencepiece`] make sure `legacy` do not require `protobuf` (#25684)","url":null}},"13807":{"buildId":13807,"infos":{"hash":"35c570c80edb9f56aa8339c03d3975847a85cb9d","abbrevHash":"35c570c8","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"fix encoder hook (#25735)","url":null}},"13808":{"buildId":13808,"infos":{"hash":"8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c","abbrevHash":"8b0a7bfc","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Docs: fix indentation in `HammingDiversityLogitsProcessor` (#25756)","url":null}},"13809":{"buildId":13809,"infos":{"hash":"4d9e45f3ef624cab41f605d7439862ce23ca806a","abbrevHash":"4d9e45f3","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for several pytorch models (batch-3) (#25705)","url":null}},"13810":{"buildId":13810,"infos":{"hash":"4b796978656e461177a83d58ec3c2b06152c63db","abbrevHash":"4b796978","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"🚨🚨🚨 [`Refactor`] Move third-party related utility files into `integrations/` folder 🚨🚨🚨 (#25599)","url":null}},"13811":{"buildId":13811,"infos":{"hash":"0040469bb8e718f4ffafef829e497805df1aa1fb","abbrevHash":"0040469b","authorName":"Tianlin Liu","authorEmail":"tliu@jacobs-alumni.de","subject":"Correct attention mask dtype for Flax GPT2 (#25636)","url":null}},"13812":{"buildId":13812,"infos":{"hash":"74081cb5fa52540bbdde620942bd3a657af85c8e","abbrevHash":"74081cb5","authorName":"Alan Ji","authorEmail":"hzji210@gmail.com","subject":"fix a typo in docsting (#25759)","url":null}},"13813":{"buildId":13813,"infos":{"hash":"015f8e110d270a0ad42de4ae5b98198d69eb1964","abbrevHash":"015f8e11","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`CodeLlama`] Add support for `CodeLlama` (#25740)","url":null}},"13814":{"buildId":13814,"infos":{"hash":"960807f62e53676723ab8281019219864ef3db4d","abbrevHash":"960807f6","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"[idefics] small fixes (#25764)","url":null}},"13815":{"buildId":13815,"infos":{"hash":"686c68f64c9d0181bd54d4d2e2446543c3eca1fa","abbrevHash":"686c68f6","authorName":"Tigran Khachatryan","authorEmail":"65066173+Geometrein@users.noreply.github.com","subject":"Add docstrings and fix VIVIT examples (#25628)","url":null}},"13816":{"buildId":13816,"infos":{"hash":"de139702a17003c7dd02e671a9a7417d346c3df2","abbrevHash":"de139702","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`LlamaFamiliy`] add a tip about dtype (#25794)","url":null}},"13817":{"buildId":13817,"infos":{"hash":"cb91ec67b54c1a8a9a24825165161c90fe7c0e51","abbrevHash":"cb91ec67","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for several pytorch models (batch-2) (#25557)","url":null}},"13818":{"buildId":13818,"infos":{"hash":"ed915cff9751e3e41ebb4733b87c45c938daf116","abbrevHash":"ed915cff","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for pytorch models (final batch) (#25750)","url":null}},"13819":{"buildId":13819,"infos":{"hash":"886b6be081e1bc28e8c6cbc93eba934f83677ab2","abbrevHash":"886b6be0","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for several pytorch models (batch-4) (#25749)","url":null}},"13820":{"buildId":13820,"infos":{"hash":"50573c648ae953dcc1b94d663651f07fb02268f4","abbrevHash":"50573c64","authorName":"Stas Bekman","authorEmail":"stas00@users.noreply.github.com","subject":"[idefics] fix vision's `hidden_act` (#25787)","url":null}},"13821":{"buildId":13821,"infos":{"hash":"738ecd17d869577d263eb1fba3fee0ab8ec5b5a2","abbrevHash":"738ecd17","authorName":"Arup De","authorEmail":"arde@linkedin.com","subject":"Arde/fsdp activation checkpointing (#25771)","url":null}},"13822":{"buildId":13822,"infos":{"hash":"39c37fe45c12bc2f936313330fe5c82319adb6e3","abbrevHash":"39c37fe4","authorName":"Aman Gupta Karmani","authorEmail":"aman@tmm1.net","subject":"Fix incorrect Boolean value in deepspeed example (#25788)","url":null}},"13823":{"buildId":13823,"infos":{"hash":"99c3d44906ec448c4559fecdc9a63eda364db4d4","abbrevHash":"99c3d449","authorName":"Lorenzo Battistela","authorEmail":"70359945+Lorenzobattistela@users.noreply.github.com","subject":"fixing name position_embeddings to object_queries (#24652)","url":null}},"13824":{"buildId":13824,"infos":{"hash":"4c21da5e347bfc53ee4ec5b71a23721fefe6822c","abbrevHash":"4c21da5e","authorName":"NielsRogge","authorEmail":"48327001+NielsRogge@users.noreply.github.com","subject":"Add ViTDet (#25524)","url":null}},"13825":{"buildId":13825,"infos":{"hash":"77713d11f6656314fb06c217cf43c4b8f5c64df8","abbrevHash":"77713d11","authorName":"NielsRogge","authorEmail":"48327001+NielsRogge@users.noreply.github.com","subject":"[DINOv2] Add backbone class (#25520)","url":null}},"13826":{"buildId":13826,"infos":{"hash":"c9bae84eb58745784e5cc6491f3f4958ba4706c3","abbrevHash":"c9bae84e","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"Resolving Attribute error when using the FSDP ram efficient feature (#25820)","url":null}},"13827":{"buildId":13827,"infos":{"hash":"dc0c102954ff1f6bcb47de85afea5edc81fc8c7f","abbrevHash":"dc0c1029","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Docs`] More clarifications on BT + FA (#25823)","url":null}},"13828":{"buildId":13828,"infos":{"hash":"3dd030d264915c71a0bdd23838dbb27156f44ed1","abbrevHash":"3dd030d2","authorName":"zspo","authorEmail":"songpo.zhang@foxmail.com","subject":"fix register (#25779)","url":null}},"13829":{"buildId":13829,"infos":{"hash":"9525515cd40ab2632cf40e1a9d21f7751b02eceb","abbrevHash":"9525515c","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Minor wording changes for Code Llama (#25815)","url":null}},"13830":{"buildId":13830,"infos":{"hash":"5b5ee235f3239413e9614bd02032b1a203dab710","abbrevHash":"5b5ee235","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`LlamaTokenizer`] `tokenize` nits. (#25793)","url":null}},"13831":{"buildId":13831,"infos":{"hash":"2ee60b757e30815529239c87235a2b794fa60286","abbrevHash":"2ee60b75","authorName":"Dongkeun Yoon","authorEmail":"57797966+MattYoon@users.noreply.github.com","subject":"fix warning trigger for embed_positions when loading xglm (#25798)","url":null}},"13832":{"buildId":13832,"infos":{"hash":"173fa7da9c29c4e3a683ac5d489cde4e7220c98a","abbrevHash":"173fa7da","authorName":"SeongWooChoi","authorEmail":"46990061+nuatmochoi@users.noreply.github.com","subject":"🌐 [i18n-KO] Translated peft.md to Korean (#25706)","url":null}},"13833":{"buildId":13833,"infos":{"hash":"33aa0af70c70d9a8205b0ff0d1d4e68807fbb173","abbrevHash":"33aa0af7","authorName":"MinJae Kang","authorEmail":"39152134+mjk0618@users.noreply.github.com","subject":"🌐 [i18n-KO] `model_memory_anatomy.md` to Korean (#25755)","url":null}},"13834":{"buildId":13834,"infos":{"hash":"483861d52db59cf99219a0281695d1e7e8859218","abbrevHash":"483861d5","authorName":"Chau Nguyen","authorEmail":"60038822+chaumng@users.noreply.github.com","subject":"Error with checking args.eval_accumulation_steps to gather tensors (#25819)","url":null}},"13835":{"buildId":13835,"infos":{"hash":"a35f889acc91cb40bd8c6659691aeb27581a69b1","abbrevHash":"a35f889a","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Tests: detect lines removed from \"utils/not_doctested.txt\" and doctest ALL generation files (#25763)","url":null}},"13836":{"buildId":13836,"infos":{"hash":"d97fd871e5ba57b23b1775ef2939ffea128dd08d","abbrevHash":"d97fd871","authorName":"heuristicwave","authorEmail":"31366038+heuristicwave@users.noreply.github.com","subject":"🌐 [i18n-KO] Translated `add_new_pipeline.md` to Korean (#25498)","url":null}},"13837":{"buildId":13837,"infos":{"hash":"aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1","abbrevHash":"aade754b","authorName":"Sohyun Sim","authorEmail":"96299403+sim-so@users.noreply.github.com","subject":"🌐 [i18n-KO] TranslatedΒ `community.md` to Korean (#25674)","url":null}},"13838":{"buildId":13838,"infos":{"hash":"245dcc49ef9862a7165aec7be9c4a3299b8d06a1","abbrevHash":"245dcc49","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"🀦update warning to If you want to use the new behaviour, set `legacy=… (#25833)","url":null}},"13839":{"buildId":13839,"infos":{"hash":"0e59c93983b84610db9a4d88be1531ba8d745ff9","abbrevHash":"0e59c939","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"update remaining `Pop2Piano` checkpoints (#25827)","url":null}},"13840":{"buildId":13840,"infos":{"hash":"0daeeb40a10178ce219fffbf41791330524eedc1","abbrevHash":"0daeeb40","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[AutoTokenizer] Add data2vec to mapping (#25835)","url":null}},"13841":{"buildId":13841,"infos":{"hash":"ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7","abbrevHash":"ce2d4bc6","authorName":"amyeroberts","authorEmail":"22614925+amyeroberts@users.noreply.github.com","subject":"MaskFormer,Mask2former - reduce memory load (#25741)","url":null}},"13842":{"buildId":13842,"infos":{"hash":"dbc16f4404eca4a75459683d5135f6accea35a02","abbrevHash":"dbc16f44","authorName":"Haylee SchΓ€fer","authorEmail":"mail@inventivetalent.org","subject":"Support loading base64 images in pipelines (#25633)","url":null}},"13843":{"buildId":13843,"infos":{"hash":"8c75cfdaeeb9ae960cfdb0ba780d35add282b2df","abbrevHash":"8c75cfda","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25834)","url":null}},"13844":{"buildId":13844,"infos":{"hash":"07998ef39926b76d3f6667025535d0859eed61c3","abbrevHash":"07998ef3","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Generate: models with custom `generate()` return `True` in `can_generate()` (#25838)","url":null}},"13845":{"buildId":13845,"infos":{"hash":"1bf2f36daf6731f001ea88ae53ba96acfb6c8497","abbrevHash":"1bf2f36d","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25832)","url":null}},"13846":{"buildId":13846,"infos":{"hash":"52574026b6740a3882d6dd1cbf1e1663d4cea27b","abbrevHash":"52574026","authorName":"Aman Gupta Karmani","authorEmail":"aman@tmm1.net","subject":"minor typo fix in PeftAdapterMixin docs (#25829)","url":null}},"13847":{"buildId":13847,"infos":{"hash":"62399d6f3568d1436e3e0364a32d13e32bb78cb6","abbrevHash":"62399d6f","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Add flax installation in daily doctest workflow (#25860)","url":null}},"13848":{"buildId":13848,"infos":{"hash":"09dc99517f5f38ee210cf1145a7b17fc99b37dac","abbrevHash":"09dc9951","authorName":"Juan Pizarro","authorEmail":"jpizarrom@gmail.com","subject":"Add Blip2 model in VQA pipeline (#25532)","url":null}},"13849":{"buildId":13849,"infos":{"hash":"ed290b083751590ba79e3a699608c8e9b70d5d9e","abbrevHash":"ed290b08","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Remote tools are turned off (#25867)","url":null}},"13850":{"buildId":13850,"infos":{"hash":"f73c20970c5cf575dd341d18216c42bec0b8a0e5","abbrevHash":"f73c2097","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix imports (#25869)","url":null}},"13851":{"buildId":13851,"infos":{"hash":"72298178bcbb5f3cb34af5283ac36dad8b869fb5","abbrevHash":"72298178","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"fix max_memory for bnb (#25842)","url":null}},"13852":{"buildId":13852,"infos":{"hash":"459bc6738c162511fabf5b9102171db1fc8bb53e","abbrevHash":"459bc673","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Docs: fix example failing doctest in `generation_strategies.md ` (#25874)","url":null}},"13853":{"buildId":13853,"infos":{"hash":"9219d1427bf3e868c76fd495bb469cf5e1542242","abbrevHash":"9219d142","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"pin pandas==2.0.3 (#25875)","url":null}},"13854":{"buildId":13854,"infos":{"hash":"1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f","abbrevHash":"1c6f072d","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Reduce CI output (#25876)","url":null}},"13855":{"buildId":13855,"infos":{"hash":"716bb2e3910fd4872064c55b0d8bc3dad754d129","abbrevHash":"716bb2e3","authorName":"NielsRogge","authorEmail":"48327001+NielsRogge@users.noreply.github.com","subject":"[ViTDet] Fix doc tests (#25880)","url":null}},"13856":{"buildId":13856,"infos":{"hash":"f8468b4facb2e46a1766a256b9fe47b0865d6854","abbrevHash":"f8468b4f","authorName":"qihqi","authorEmail":"qihan.dev@gmail.com","subject":"For xla tensors, use an alternative way to get a unique id (#25802)","url":null}},"13857":{"buildId":13857,"infos":{"hash":"e95bcaeef0bd6b084b7615faae411a14d50bcfee","abbrevHash":"e95bcaee","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"fix ds z3 checkpointing when `stage3_gather_16bit_weights_on_model_save=False` (#25817)","url":null}},"13858":{"buildId":13858,"infos":{"hash":"99fc3ac8ac2d79f19e983b63c2992b78f4509111","abbrevHash":"99fc3ac8","authorName":"Vibhor Kumar","authorEmail":"vibhor.kumar.me@gmail.com","subject":"Modify efficient GPU training doc with now-available adamw_bnb_8bit optimizer (#25807)","url":null}},"13859":{"buildId":13859,"infos":{"hash":"3b39b906183ed08d9961908eb73104aeea345d11","abbrevHash":"3b39b906","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`TokenizerFast`] `can_save_slow_tokenizer` as a property for when `vocab_file`'s folder was removed (#25626)","url":null}},"13860":{"buildId":13860,"infos":{"hash":"a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c","abbrevHash":"a39ebbf8","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`CodeLlama`] Fix CI (#25890)","url":null}},"13861":{"buildId":13861,"infos":{"hash":"2be8a9098e06262bdd5c16b5e8a70f145df88e96","abbrevHash":"2be8a909","authorName":"raghavanone","authorEmail":"115454562+raghavanone@users.noreply.github.com","subject":"Save image_processor while saving pipeline (ImageSegmentationPipeline) (#25884)","url":null}},"13862":{"buildId":13862,"infos":{"hash":"9c5acca0028b550e1328ba7e2f16418fe0a0c634","abbrevHash":"9c5acca0","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`InstructBlip`] FINAL Fix instructblip test (#25887)","url":null}},"13863":{"buildId":13863,"infos":{"hash":"eaf5e98ec03d73c24367438100b05c02ce5ad10c","abbrevHash":"eaf5e98e","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for tf models batch 1 (#25853)","url":null}},"13864":{"buildId":13864,"infos":{"hash":"3fb1535b09901db72a41095c007c29bcdf02e3ae","abbrevHash":"3fb1535b","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Update `setup.py` (#25893)","url":null}},"13865":{"buildId":13865,"infos":{"hash":"0f08cd205a440d23e6bf924cddd73ff48e09fe35","abbrevHash":"0f08cd20","authorName":"Sylvain Gugger","authorEmail":"35901082+sgugger@users.noreply.github.com","subject":"Smarter check for `is_tensor` (#25871)","url":null}},"13866":{"buildId":13866,"infos":{"hash":"ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1","abbrevHash":"ef10dbce","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"remove torch_dtype override (#25894)","url":null}},"13867":{"buildId":13867,"infos":{"hash":"4ece3b9433ea0bedff0d64fe00623c35766d7d44","abbrevHash":"4ece3b94","authorName":"Matthijs Hollemans","authorEmail":"mail@hollance.com","subject":"add VITS model (#24085)","url":null}},"13868":{"buildId":13868,"infos":{"hash":"024acd271b60568bba214901a9e71d67c44353dc","abbrevHash":"024acd27","authorName":"pkumc","authorEmail":"machijaychou@163.com","subject":"fix FSDP model resume optimizer & scheduler (#25852)","url":null}},"13869":{"buildId":13869,"infos":{"hash":"53e2fd785b2792e20f13189d30d1d4ef7d9cf673","abbrevHash":"53e2fd78","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Falcon: Add RoPE scaling (#25878)","url":null}},"13870":{"buildId":13870,"infos":{"hash":"16d6e3087cd35cb08ee24137900340d6924103dd","abbrevHash":"16d6e308","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Better error message for pipeline loading (#25912)","url":null}},"13871":{"buildId":13871,"infos":{"hash":"69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b","abbrevHash":"69c5b8f1","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Remove broken docs for MusicGen (#25905)","url":null}},"13872":{"buildId":13872,"infos":{"hash":"be0e189bd3f2b5b960a4062361ead32c055a362e","abbrevHash":"be0e189b","authorName":"Zach Mueller","authorEmail":"muellerzr@gmail.com","subject":"Revert frozen training arguments (#25903)","url":null}},"13873":{"buildId":13873,"infos":{"hash":"b439129e74bb207138e49ffb1f147bd94aa58574","abbrevHash":"b439129e","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Add to TTA pipeline (#25906)","url":null}},"13874":{"buildId":13874,"infos":{"hash":"1fa2d89a9bb98a15e9720190e07d272a42f03d28","abbrevHash":"1fa2d89a","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[MMS] Update docs with HF TTS implementation (#25907)","url":null}},"13875":{"buildId":13875,"infos":{"hash":"3587769c08ffaf42c99f6882d4ad76d3a3669e5e","abbrevHash":"3587769c","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Only trigger tokenizer warning for uroman (#25915)","url":null}},"13876":{"buildId":13876,"infos":{"hash":"a4dd53d88e4852f023332d284ff07a01afcd5681","abbrevHash":"a4dd53d8","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"Update-llama-code (#25826)","url":null}},"13877":{"buildId":13877,"infos":{"hash":"0afa5071bd84e44301750fdc594e33db102cf374","abbrevHash":"0afa5071","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update model_memory_anatomy.md (#25896)","url":null}},"13878":{"buildId":13878,"infos":{"hash":"ab8cba824e3887d90cb9f4d5866fde9243f2c9fe","abbrevHash":"ab8cba82","authorName":"ydshieh","authorEmail":"ydshieh@users.noreply.github.com","subject":"CI: hotfix (skip VitsModelTest::test_initialization)","url":null}},"13879":{"buildId":13879,"infos":{"hash":"b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34","abbrevHash":"b1d475f6","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Skip offload tests for `ViTDet` (#25913)","url":null}},"13880":{"buildId":13880,"infos":{"hash":"0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72","abbrevHash":"0f0e1a2c","authorName":"omahs","authorEmail":"73983677+omahs@users.noreply.github.com","subject":"Fix typos (#25936)","url":null}},"13881":{"buildId":13881,"infos":{"hash":"51e1e8120bc569c3f60f7c73ff6e38a90e6229f7","abbrevHash":"51e1e812","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update community.md (#25928)","url":null}},"13882":{"buildId":13882,"infos":{"hash":"d4407a3bd13b8ec3978b9ba8e4e45cb11f230437","abbrevHash":"d4407a3b","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update autoclass_tutorial.md (#25929)","url":null}},"13883":{"buildId":13883,"infos":{"hash":"604a6c51ae0b4ce5e8213ea86ed9c71373223a5d","abbrevHash":"604a6c51","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25941)","url":null}},"13884":{"buildId":13884,"infos":{"hash":"f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0","abbrevHash":"f435003e","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[MMS] Fix pip install in docs (#25949)","url":null}},"13885":{"buildId":13885,"infos":{"hash":"eb984418e2f26f749e832730b264d7762e6be8c2","abbrevHash":"eb984418","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Handle deprecated weight norm (#25946)","url":null}},"13886":{"buildId":13886,"infos":{"hash":"bfb1895e3346cb8a2bf2560c75d45e70edf46a47","abbrevHash":"bfb1895e","authorName":"Omar Sanseviero","authorEmail":"osanseviero@gmail.com","subject":"Import deepspeed utilities from integrations (#25919)","url":null}},"13887":{"buildId":13887,"infos":{"hash":"7cd01d4e384f7ce9c18a81a4decb2c2531542661","abbrevHash":"7cd01d4e","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#25922)","url":null}},"13888":{"buildId":13888,"infos":{"hash":"d750eff62757a46160b6f73b95e8035c49c2971b","abbrevHash":"d750eff6","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[VITS] Fix init test (#25945)","url":null}},"13889":{"buildId":13889,"infos":{"hash":"034bc5d26ad7c0e284265d92d3da39d786138545","abbrevHash":"034bc5d2","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"Add proper Falcon docs and conversion script (#25954)","url":null}},"13890":{"buildId":13890,"infos":{"hash":"3a479672ea95b058b621dcdcd1d15b73f36dc25a","abbrevHash":"3a479672","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Fix failing test (#25963)","url":null}},"13891":{"buildId":13891,"infos":{"hash":"44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b","abbrevHash":"44d2c199","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix smart check (#25955)","url":null}},"13892":{"buildId":13892,"infos":{"hash":"040c4613c2fac59f16e333a630d9a69b6ff9ca5d","abbrevHash":"040c4613","authorName":"David Reguera","authorEmail":"33068707+nablabits@users.noreply.github.com","subject":"Add type hints for tf models final batch (#25883)","url":null}},"13893":{"buildId":13893,"infos":{"hash":"22a69f1d7d520d5fbccbdb163d05db56bf79724c","abbrevHash":"22a69f1d","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Put Falcon back (#25960)","url":null}},"13894":{"buildId":13894,"infos":{"hash":"49b69fe0d4885e258dbf657e35c445a94ffd09ae","abbrevHash":"49b69fe0","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Falcon`] Remove SDPA for falcon to support earlier versions of PyTorch (< 2.0) (#25947)","url":null}},"13895":{"buildId":13895,"infos":{"hash":"d8e13b3e04da9e61c6f16df43815656f59688abd","abbrevHash":"d8e13b3e","authorName":"Lysandre","authorEmail":"lysandre@huggingface.co","subject":"v4.34.dev.0","url":null}},"13896":{"buildId":13896,"infos":{"hash":"404ff8fc17599788a546818373be113b1fc8456a","abbrevHash":"404ff8fc","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Fix typo (#25966)","url":null}},"13897":{"buildId":13897,"infos":{"hash":"feec56959afe480e57b2acc177111ae18a5ea757","abbrevHash":"feec5695","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix Detr CI (#25972)","url":null}},"13898":{"buildId":13898,"infos":{"hash":"fbbe1b8a406a09b47673f606f0af6f3d5e045575","abbrevHash":"fbbe1b8a","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Fix `test_load_img_url_timeout` (#25976)","url":null}},"13899":{"buildId":13899,"infos":{"hash":"1cc3bc22fed6ffc5937cf66c799dd97840622e69","abbrevHash":"1cc3bc22","authorName":"Huazhong Ji","authorEmail":"hzji210@gmail.com","subject":"nn.Identity is not required to be compatible with PyTorch < 1.1.0 as the minimum PyTorch version we currently support is 1.10.0 (#25974)","url":null}},"13900":{"buildId":13900,"infos":{"hash":"52a46dc57bb653aa9dab440e4bb70988b15cdc7e","abbrevHash":"52a46dc5","authorName":"Susnato Dhar","authorEmail":"susnatodhar10@gmail.com","subject":"Add `Pop2Piano` space demo. (#25975)","url":null}},"13901":{"buildId":13901,"infos":{"hash":"6f125aaa4807d84e9004ce79035c7653aedfd630","abbrevHash":"6f125aaa","authorName":"Kai","authorEmail":"140378742+kai01ai@users.noreply.github.com","subject":"fix typo (#25981)","url":null}},"13902":{"buildId":13902,"infos":{"hash":"391f26459ab1a392aedc82e0546ce5f88acb7cd5","abbrevHash":"391f2645","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Use main in conversion script (#25973)","url":null}},"13903":{"buildId":13903,"infos":{"hash":"6316ce8d2703f210b91853aba90d44755a241334","abbrevHash":"6316ce8d","authorName":"Julien Chaumond","authorEmail":"julien@huggingface.co","subject":"[doc] Always call it Agents for consistency (#25958)","url":null}},"13904":{"buildId":13904,"infos":{"hash":"7011cd8667d7a51bd608e6a722f061d5ac5f4166","abbrevHash":"7011cd86","authorName":"Traun Leyden","authorEmail":"traun.leyden@gmail.com","subject":"Update RAG README.md with correct path to examples/seq2seq (#25953)","url":null}},"13905":{"buildId":13905,"infos":{"hash":"aea761499f4b1193f2706f471442da6f9df65d65","abbrevHash":"aea76149","authorName":"Sahel Sharify","authorEmail":"sahel.sharifi@gmail.com","subject":"Update training_args.py to remove the runtime error (#25920)","url":null}},"13906":{"buildId":13906,"infos":{"hash":"9a70d6e56f2801c9a3aa80ca97e6a32024db72b7","abbrevHash":"9a70d6e5","authorName":"Joao Gante","authorEmail":"joaofranciscocardosogante@gmail.com","subject":"Trainer: delegate default generation values to `generation_config` (#25987)","url":null}},"13907":{"buildId":13907,"infos":{"hash":"aa5c94d38deb3960e809b75bc959dc4357d3dd2b","abbrevHash":"aa5c94d3","authorName":"Yih-Dar","authorEmail":"2521628+ydshieh@users.noreply.github.com","subject":"Show failed tests on CircleCI layout in a better way (#25895)","url":null}},"13908":{"buildId":13908,"infos":{"hash":"70a98024b1b0007d2d8bdced854cd9b638dbb07b","abbrevHash":"70a98024","authorName":"Abhilash Majumder","authorEmail":"30946547+abhilash1910@users.noreply.github.com","subject":"Patch with accelerate xpu (#25714)","url":null}},"13909":{"buildId":13909,"infos":{"hash":"da1af21dbbc48ad4f6f0b27635cd3993ddc22b55","abbrevHash":"da1af21d","authorName":"andreeahedes","authorEmail":"53334746+andreeahedes@users.noreply.github.com","subject":"PegasusX add _no_split_modules (#25933)","url":null}},"13910":{"buildId":13910,"infos":{"hash":"1110b565d62e56105c8e5e4e2848bfbf469f8200","abbrevHash":"1110b565","authorName":"raghavanone","authorEmail":"115454562+raghavanone@users.noreply.github.com","subject":"Add TFDebertaV2ForMultipleChoice (#25932)","url":null}},"13911":{"buildId":13911,"infos":{"hash":"6bc517ccd4a3bcda4d0621d54a37c3e047df223a","abbrevHash":"6bc517cc","authorName":"Sourab Mangrulkar","authorEmail":"13534540+pacman100@users.noreply.github.com","subject":"deepspeed resume from ckpt fixes and adding support for deepspeed optimizer and HF scheduler (#25863)","url":null}},"13912":{"buildId":13912,"infos":{"hash":"8d518013efbd10c178dd0dba0f9ba93229e2e78a","abbrevHash":"8d518013","authorName":"Sanchit Gandhi","authorEmail":"93869735+sanchit-gandhi@users.noreply.github.com","subject":"[Wav2Vec2 Conformer] Fix inference float16 (#25985)","url":null}},"13913":{"buildId":13913,"infos":{"hash":"6206f599e1f45b619f72f9d194929e545549416f","abbrevHash":"6206f599","authorName":"Injin Paek","authorEmail":"71638597+eenzeenee@users.noreply.github.com","subject":"Add LLaMA resources (#25859)","url":null}},"13914":{"buildId":13914,"infos":{"hash":"d0354e5e86842b757cec1ecb7de314a1f2421c1e","abbrevHash":"d0354e5e","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`CI`] Fix red CI and ERROR failed should show (#25995)","url":null}},"13915":{"buildId":13915,"infos":{"hash":"4fa0aff21ee083d0197a898cdf17ff476fae2ac3","abbrevHash":"4fa0aff2","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"[`VITS`] tokenizer integration test: fix revision did not exist (#25996)","url":null}},"13916":{"buildId":13916,"infos":{"hash":"b8def689346c45958268ec389ee6242bddc6d78c","abbrevHash":"b8def689","authorName":"Tanay Mehta","authorEmail":"heyytanay@gmail.com","subject":"Fix Mega chunking error when using decoder-only model (#25765)","url":null}},"13917":{"buildId":13917,"infos":{"hash":"172f42c512e1bf32554ef910fe82f07916b4d4af","abbrevHash":"172f42c5","authorName":"tju_skywalker","authorEmail":"929019882@qq.com","subject":"save space when converting hf model to megatron model. (#25950)","url":null}},"13918":{"buildId":13918,"infos":{"hash":"f6295c6c535c2b036a4533327ab5a92c6b199b78","abbrevHash":"f6295c6c","authorName":"Nino Risteski","authorEmail":"95188570+NinoRisteski@users.noreply.github.com","subject":"Update README.md (#26003)","url":null}},"13919":{"buildId":13919,"infos":{"hash":"f6301b9a13b8467d1f88a6f419d76aefa15bd9b8","abbrevHash":"f6301b9a","authorName":"Lysandre Debut","authorEmail":"lysandre.debut@reseau.eseo.fr","subject":"Falcon: fix revision propagation (#26006)","url":null}},"13920":{"buildId":13920,"infos":{"hash":"842e99f1b9ee2a0fa239997ef695c5ed0bd77195","abbrevHash":"842e99f1","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"TF-OPT attention mask fixes (#25238)","url":null}},"13921":{"buildId":13921,"infos":{"hash":"3e203f92bed937fa13c35adee1bdc45a92d18e61","abbrevHash":"3e203f92","authorName":"zspo","authorEmail":"songpo.zhang@foxmail.com","subject":"Fix small typo README.md (#25934)","url":null}},"13922":{"buildId":13922,"infos":{"hash":"fa522d8d7ba512d1e103f891263602ee3f2bd46d","abbrevHash":"fa522d8d","authorName":"Harheem Kim","authorEmail":"49297157+harheem@users.noreply.github.com","subject":"🌐[i18n-KO] Translated `llm_tutorial.md` to Korean (#25791)","url":null}},"13923":{"buildId":13923,"infos":{"hash":"300d6a4a62aac89b3f439110561d5a2268ffad9e","abbrevHash":"300d6a4a","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"Remove Falcon from undocumented list (#26008)","url":null}},"13924":{"buildId":13924,"infos":{"hash":"fa6107c97edf7cf725305a34735a57875b67d85e","abbrevHash":"fa6107c9","authorName":"Marc Sun","authorEmail":"57196510+SunMarc@users.noreply.github.com","subject":"modify context length for GPTQ + version bump (#25899)","url":null}},"13925":{"buildId":13925,"infos":{"hash":"e3a9716384146b89f21a39bdf13dd4b1cac740bb","abbrevHash":"e3a97163","authorName":"Zach Mueller","authorEmail":"muellerzr@gmail.com","subject":"Fix err with FSDP (#25991)","url":null}},"14018":{"buildId":14018,"infos":{"hash":"f29fe7458953dbf00addaf793d95ea1965bc8441","abbrevHash":"f29fe745","authorName":"Matt","authorEmail":"Rocketknight1@users.noreply.github.com","subject":"Rewrite for custom code warning messages (#26291)","url":null}},"14019":{"buildId":14019,"infos":{"hash":"245532065d3ceddf1c0f8cb3e60ab6451861100a","abbrevHash":"24553206","authorName":"fxmarty","authorEmail":"9808326+fxmarty@users.noreply.github.com","subject":"fix deepspeed available detection (#26252)","url":null}},"14020":{"buildId":14020,"infos":{"hash":"00247ea0dec9b2219a43973a2d90c059dfa1df17","abbrevHash":"00247ea0","authorName":"Jinho Park","authorEmail":"jinhoparkseoul@gmail.com","subject":"add bbox input validation (#26294)","url":null}},"14021":{"buildId":14021,"infos":{"hash":"f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609","abbrevHash":"f94c9b3d","authorName":"Arthur","authorEmail":"48595927+ArthurZucker@users.noreply.github.com","subject":"include changes from llama (#26260)","url":null}},"14022":{"buildId":14022,"infos":{"hash":"0b5024ce725a0f6b6d8cfe740e7a2a6021257c37","abbrevHash":"0b5024ce","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`Trainer`] Refactor trainer + bnb logic (#26248)","url":null}},"14023":{"buildId":14023,"infos":{"hash":"e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d","abbrevHash":"e3a4bd2b","authorName":"Shijie Wu","authorEmail":"swu671@bloomberg.net","subject":"add custom RMSNorm to `ALL_LAYERNORM_LAYERS` (#26227)","url":null}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_r/untime_s_.json deleted file mode 100644 index d51949d0b25cb3e64512708199d13ff3f0f2717d..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":65.88671660423279,"14019":73.87720775604248,"14020":65.70985412597656,"14021":74.21420788764954,"14022":65.80531525611877,"14023":84.21697664260864},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_t/hroughput_/samles_s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_t/hroughput_/samles_s_.json deleted file mode 100644 index b17a6cde2bb9abd786e54c1fe26e4d6be1192537..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_t/hroughput_/samles_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":81.35175459108635,"14019":72.55282329700124,"14020":81.57071829324109},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14020","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14020}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_t/hroughput_/samples_s_.json deleted file mode 100644 index dbd55b102a981756c91d3300c4ae7ab4ae6f46ad..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_overall_/training_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14021":72.22336736537468,"14022":81.4523869255624,"14023":63.64512493420676},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_training/_runtime_s/_.json b/dana/configs/db/Training/series/bert_1gpu_/0_training/_runtime_s/_.json deleted file mode 100644 index d5a6a6940172132581b6a37e40ec6235d1b2fcc6..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_training/_runtime_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":58.791839599609375,"14019":65.8548059463501,"14020":58.61403751373291,"14021":66.19549345970154,"14022":58.7314658164978,"14023":75.10427641868591},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_training/_throughpu/t_samples_/s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_training/_throughpu/t_samples_/s_.json deleted file mode 100644 index aed70bfeb8b8cb5b0ff1c98e864e76238815e0cd..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_training/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":91.1691152463209,"14019":81.39117446290295,"14020":91.44567116271737,"14021":80.97227952931658,"14022":91.26283373799882,"14023":71.36744078485569},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_warmup_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_warmup_r/untime_s_.json deleted file mode 100644 index 0528e4812fdec8aca8fe942b766bbecb0b66d470..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_warmup_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":7.094875812530518,"14019":8.022400379180908,"14020":7.095812559127808,"14021":8.018710851669312,"14022":7.073848247528076,"14023":9.112696647644043},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_warmup_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_warmup_t/hroughput_/samples_s_.json deleted file mode 100644 index 433025531a2236eb7d4e1561cb173c912ca0ff37..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_warmup_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":90.20594819569256,"14019":79.77662167807989,"14020":90.19403974767148,"14021":79.81332808212765,"14022":90.47409240417973,"14023":70.23168056027222},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_r/untime_s_.json deleted file mode 100644 index 4ef73ac9270a833feb22fa7fcb7f5043a29f270c..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":35.39404010772705,"14019":35.39086174964905,"14020":35.434736013412476,"14021":35.20041847229004,"14022":35.14369058609009,"14023":35.20409655570984},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":35.29464058081309,"ratio":0,"diff":0},"base":{"average":35.29464058081309}},"averages":[{"start":0,"end":5,"length":6,"average":35.29464058081309,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_t/hroughput_/samles_s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_t/hroughput_/samles_s_.json deleted file mode 100644 index 7b63d737be7dd3b3daed60694fbf44887adbafc3..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_t/hroughput_/samles_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":151.43792524634202,"14019":151.4515254789791,"14020":151.26400258692982},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14020","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14020}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_t/hroughput_/samples_s_.json deleted file mode 100644 index 0dc45733149894ac06d62afcfa64589ac1d73ae2..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_overall_/training_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14021":152.2709170125185,"14022":152.51670813769042,"14023":152.25500792266882},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_training/_runtime_s/_.json b/dana/configs/db/Training/series/bert_1gpu_/1_training/_runtime_s/_.json deleted file mode 100644 index 379e3baff6360ec6d1ca88a4eb14e780d6ea5d1c..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_training/_runtime_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":31.10353970527649,"14019":31.091834545135498,"14020":31.070497274398804,"14021":30.926475286483765,"14022":30.848360061645508,"14023":30.903000831604004},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":30.990617950757343,"ratio":0,"diff":0},"base":{"average":30.990617950757343}},"averages":[{"start":0,"end":5,"length":6,"average":30.990617950757343,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_training/_throughpu/t_samples_/s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_training/_throughpu/t_samples_/s_.json deleted file mode 100644 index 15d7e4a512459df8bc7d84b97d26b37838603a4e..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_training/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":172.3276530835079,"14019":172.39252937033925,"14020":172.51091775787205,"14021":173.3142865570121,"14022":173.75315865377928,"14023":173.44593909205133},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":172.95741408576032,"ratio":0,"diff":0},"base":{"average":172.95741408576032}},"averages":[{"start":0,"end":5,"length":6,"average":172.95741408576032,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_warmup_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_warmup_r/untime_s_.json deleted file mode 100644 index 9cf0883066055f84ed13a6897aad1cb0ec9c3a2c..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_warmup_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":4.290498971939087,"14019":4.299026012420654,"14020":4.364237308502197,"14021":4.273942232131958,"14022":4.295328855514526,"14023":4.3010945320129395},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":4.30402131875356,"ratio":0,"diff":0},"base":{"average":4.30402131875356}},"averages":[{"start":0,"end":5,"length":6,"average":4.30402131875356,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_warmup_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_warmup_t/hroughput_/samples_s_.json deleted file mode 100644 index 3db4e2519c9a5bcb7b0029a0385caab07545bd25..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_warmup_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":149.16679952279597,"14019":148.87092986898094,"14020":146.64647102328345,"14021":149.74465382063684,"14022":148.99906887882187,"14023":148.79933357346508},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"details":{"analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"aRange":{"up":10,"upIsValue":false,"down":-10,"downIsValue":false},"indexStart":0,"first":0},"summary":{"lastBuildId":14023,"status":"similar","current":{"average":148.70454278133067,"ratio":0,"diff":0},"base":{"average":148.70454278133067}},"averages":[{"start":0,"end":5,"length":6,"average":148.70454278133067,"ratio":0,"diff":0,"status":"similar"}]}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_r/untime_s_.json deleted file mode 100644 index 957c8d871452deacac153fe6bf5bacab440d36fb..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":65.32293772697449,"14019":89.21672701835632,"14020":76.37052369117737,"14021":88.50410795211792,"14022":63.03644561767578,"14023":85.65015506744385},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_t/hroughput_/samles_s_.json b/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_t/hroughput_/samles_s_.json deleted file mode 100644 index 3278caea16c3e27fa0edbbf98c67d14786466754..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_t/hroughput_/samles_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":72.99120593639651,"14019":53.44289304649099,"14020":62.43246437958915},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14020","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14020}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_t/hroughput_/samples_s_.json deleted file mode 100644 index 5c021cb53edec3c34a870f5ca211abe21314d52d..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_overall_/training_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14021":53.87320555312033,"14022":75.63878250557683,"14023":55.66831719388616},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_training/_runtime_s/_.json b/dana/configs/db/Training/series/bert_1gpu_/2_training/_runtime_s/_.json deleted file mode 100644 index d8784e37d6859ee81eefdaacc6d0546e88c02ed4..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_training/_runtime_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":50.9871141910553,"14019":70.27028131484985,"14020":59.860506772994995,"14021":69.5253918170929,"14022":50.450583934783936,"14023":66.6864264011383},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_training/_throughpu/t_samples_/s_.json b/dana/configs/db/Training/series/bert_1gpu_/2_training/_throughpu/t_samples_/s_.json deleted file mode 100644 index 2d919dfb50194c0fc45b420dfa706ad7e10ff64b..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_training/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":93.51382355419624,"14019":67.85229702776789,"14020":79.65184822242432,"14021":68.5792611215142,"14022":94.50832137371216,"14023":71.49880803807193},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_warmup_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/2_warmup_r/untime_s_.json deleted file mode 100644 index 2062aceecf525592ff4360a4ea76910b21e1342f..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_warmup_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":14.335822343826294,"14019":18.946444749832153,"14020":16.510013103485107,"14021":18.978710889816284,"14022":12.58586072921753,"14023":18.963727474212646},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/2_warmup_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/2_warmup_t/hroughput_/samples_s_.json deleted file mode 100644 index ba555e6ef552d285f1c7edbdb35517a437e497ef..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/2_warmup_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":89.28682075578529,"14019":67.55884900312707,"14020":77.52870890997683,"14021":67.44399066044208,"14022":101.70142730314309,"14023":67.49727877816089},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_r/untime_s_.json deleted file mode 100644 index 2330f870115d313e426693e07956dd2892ef4631..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":33.440120220184326,"14019":44.65711045265198,"14020":35.8595232963562,"14021":44.754762411117554,"14022":30.102943181991577,"14023":44.58680176734924},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_t/hroughput_/samles_s_.json b/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_t/hroughput_/samles_s_.json deleted file mode 100644 index d719cb1af6e8f95bf5836a885868f109f4764ca4..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_t/hroughput_/samles_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":142.58321945631207,"14019":106.76911138384796,"14020":132.9632845533251},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14020","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14020}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_t/hroughput_/samples_s_.json deleted file mode 100644 index cc04f2af2fa28b40e639451d4a1caf9f1d03e1b8..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_overall_/training_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14021":106.53614818018961,"14022":158.38982823620884,"14023":106.9374750151196},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_training/_runtime_s/_.json b/dana/configs/db/Training/series/bert_1gpu_/3_training/_runtime_s/_.json deleted file mode 100644 index 2b822cff372ff70f740a911fd5915c5bc6efb3f7..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_training/_runtime_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":26.38365411758423,"14019":34.85708165168762,"14020":27.590984582901,"14021":34.9902446269989,"14022":23.675896406173706,"14023":34.83749222755432},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_training/_throughpu/t_samples_/s_.json b/dana/configs/db/Training/series/bert_1gpu_/3_training/_throughpu/t_samples_/s_.json deleted file mode 100644 index c778d8abe1b655427031616487bbbe5976b0ea38..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_training/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":180.71795433454437,"14019":136.78712542962285,"14020":172.81007082852994,"14021":136.2665523155832,"14022":201.38625031138,"14023":136.8640420170314},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_warmup_r/untime_s_.json b/dana/configs/db/Training/series/bert_1gpu_/3_warmup_r/untime_s_.json deleted file mode 100644 index 703b198618619741efee013c41700d6509f57573..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_warmup_r/untime_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":7.056463956832886,"14019":9.80002760887146,"14020":8.268535375595093,"14021":9.764516115188599,"14022":6.427045106887817,"14023":9.749308347702026},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/3_warmup_t/hroughput_/samples_s_.json b/dana/configs/db/Training/series/bert_1gpu_/3_warmup_t/hroughput_/samples_s_.json deleted file mode 100644 index 2508e7184636866ceccb91051bfa6111095c7825..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/3_warmup_t/hroughput_/samples_s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14018":181.39396839978977,"14019":130.61187693402843,"14020":154.80371575575165,"14021":131.08688489017638,"14022":199.15839685460327,"14023":131.29136492043602},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14023","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14023}}} \ No newline at end of file diff --git a/dana/configs/db/admin/compares.json b/dana/configs/db/admin/compares.json deleted file mode 100644 index 5898963fecca7a0a90bc588c2366c4a831fede26..0000000000000000000000000000000000000000 --- a/dana/configs/db/admin/compares.json +++ /dev/null @@ -1 +0,0 @@ -{"Compare_with_build_1000":{"compareId":"Compare_with_build_1000","description":"Build 1000","projectId":"Test","useAverage":true,"compareWith":{"projectId":"Test","useAverage":false}},"Compare_with_build_1000_avg":{"compareId":"Compare_with_build_1000_avg","description":"Average build 1000","projectId":"Test","useAverage":true,"compareWith":{"projectId":"Test","useAverage":true}},"Compare_with_build_1000_filter_1":{"compareId":"Compare_with_build_1000_filter_1","description":"Build 1000 filter 1","projectId":"Test","filter":"1","useAverage":true,"compareWith":{"projectId":"Test","buildId":1000,"useAverage":true}},"Compare_with_build_1000_replace_2_1":{"compareId":"Compare_with_build_1000_replace_2_1","description":"Build 1000 replace 2 -> 1","projectId":"Test","filter":"2","useAverage":true,"compareWith":{"projectId":"Test","replace":{"substr":"2","newSubStr":"1"},"buildId":1000,"useAverage":true}}} diff --git a/dana/configs/db/admin/globalStats.json b/dana/configs/db/admin/globalStats.json deleted file mode 100644 index f8d852159a3e9a3745a5d45fff97eaab4df8fc75..0000000000000000000000000000000000000000 --- a/dana/configs/db/admin/globalStats.json +++ /dev/null @@ -1 +0,0 @@ -{"numSamples":3602,"numSeries":187,"projects":{"Inference":{"numSamples":3458,"numSeries":159},"Training":{"numSamples":144,"numSeries":28}}} \ No newline at end of file diff --git a/dana/configs/db/admin/projects.json b/dana/configs/db/admin/projects.json deleted file mode 100644 index 5ecb3f5697f2f3e23a570717ee32296245a7df10..0000000000000000000000000000000000000000 --- a/dana/configs/db/admin/projects.json +++ /dev/null @@ -1 +0,0 @@ -{"Test":{"description":"This is a test project","infos":"This is a test project","users":"myadress.com"},"Training":{"description":"Benchmarks related to training","users":"","useBugTracker":false},"Inference":{"description":"Benchmarks related to inference","users":"","useBugTracker":false}} \ No newline at end of file diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 119693b0707dc8eca9e457be47d6823b2cb8cc03..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 922dcca434bbceeab53290b2cd3fb85b7e5039ec..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.690048,0.00331,302.0 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/main.log deleted file mode 100644 index 6a68fdc92049769fc74625ffde363d1faa7b9550..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:24:06,521][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:24:06,521][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:24:07,769][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:24:07,770][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:24:07,770][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:24:07,770][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:24:07,770][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:24:07,770][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:24:08,393][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:24:08,394][inference][INFO] - Running inference benchmark -[2023-08-17 12:24:08,513][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:24:08,515][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:24:08,572][inference][INFO] - + Forward pass peak memory: 466.690048 (MB) -[2023-08-17 12:24:08,573][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:24:08,575][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:24:08,611][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:24:13,663][inference][INFO] - + Forward pass latency: 3.31e-03 (s) -[2023-08-17 12:24:13,665][inference][INFO] - + Forward pass throughput: 302.00 (samples/s) -[2023-08-17 12:24:13,665][inference][INFO] - Saving inference results -[2023-08-17 12:24:13,677][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 33219817917592338f0fd45f42aa8cbe0cfb2cf7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a1e4e3b0646a9c35e2448f9b30e070c77303d5ad..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.69356799999997,0.00343,1170.0 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 41fcf66f832cccff0ec96a1c83f1664fe1ff958f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:24:14,067][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:24:14,067][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:24:14,507][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:24:14,508][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:24:14,508][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:24:14,508][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:24:14,508][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:24:14,508][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:24:14,624][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:24:14,625][inference][INFO] - Running inference benchmark -[2023-08-17 12:24:14,749][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:24:14,750][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:24:14,790][inference][INFO] - + Forward pass peak memory: 467.69356799999997 (MB) -[2023-08-17 12:24:14,791][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:24:14,793][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:24:14,827][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:24:19,885][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-17 12:24:19,886][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-17 12:24:19,887][inference][INFO] - Saving inference results -[2023-08-17 12:24:19,894][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 86e3193cb94155c8645525b637c45f2eb5cc0d5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 12295976bcd084f241461ecff3b5f810b6094163..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.082112,0.00381,262.0,0.502,199.0 diff --git a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2ffd601e179e4b99d0d67f7229bfa23ebd6c60cb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_08:48:38_e50c9253f3a38d9db56c02d3d8d04e2f20070de8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 12:24:25,259][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:24:25,260][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:24:26,968][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 12:24:26,968][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:24:26,969][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:24:26,969][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:24:26,969][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:24:26,969][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:24:27,604][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:24:27,605][inference][INFO] - Running inference benchmark -[2023-08-17 12:24:27,800][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:24:27,843][inference][INFO] - + Forward pass peak memory: 469.082112 (MB) -[2023-08-17 12:24:27,845][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:24:27,880][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:24:32,931][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-17 12:24:32,933][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-17 12:24:32,933][inference][INFO] - + Warming up the generation pass -[2023-08-17 12:24:33,528][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 12:24:38,549][inference][INFO] - + Generation pass latency: 5.02e-01 (s) -[2023-08-17 12:24:38,551][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-08-17 12:24:38,551][inference][INFO] - Saving inference results -[2023-08-17 12:24:38,563][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2de5cb029f09f9a024323a1632f3436ff71a6a1a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c64ad8d8a5885c663dd8e72bb08718f16c5ec0e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.267584,0.00456,219.0 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/main.log deleted file mode 100644 index 807004312acc32c7c5333747bb98a783b9e9848f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:25:40,385][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:25:40,386][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:25:41,641][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:25:41,641][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:25:41,641][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:25:41,642][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:25:41,642][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:25:41,642][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:25:42,266][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:25:42,267][inference][INFO] - Running inference benchmark -[2023-08-17 12:25:42,387][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:25:42,389][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:25:42,448][inference][INFO] - + Forward pass peak memory: 467.267584 (MB) -[2023-08-17 12:25:42,449][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:25:42,452][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:25:42,495][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:25:47,540][inference][INFO] - + Forward pass latency: 4.56e-03 (s) -[2023-08-17 12:25:47,543][inference][INFO] - + Forward pass throughput: 219.00 (samples/s) -[2023-08-17 12:25:47,543][inference][INFO] - Saving inference results -[2023-08-17 12:25:47,554][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8e92f5726432b293b4a7a5d620f79c0089eec983..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 05d32c99dc2f45807a07bc0989f45527b0a564d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.283392,0.00376,1060.0 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8b71def35b714d3ff098bf2d1b15f1855c0211ec..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:25:47,954][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:25:47,954][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:25:48,393][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:25:48,393][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:25:48,394][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:25:48,394][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:25:48,394][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:25:48,394][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:25:48,509][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:25:48,509][inference][INFO] - Running inference benchmark -[2023-08-17 12:25:48,629][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:25:48,631][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:25:48,671][inference][INFO] - + Forward pass peak memory: 468.283392 (MB) -[2023-08-17 12:25:48,672][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:25:48,673][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:25:48,725][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:25:53,771][inference][INFO] - + Forward pass latency: 3.76e-03 (s) -[2023-08-17 12:25:53,772][inference][INFO] - + Forward pass throughput: 1060.00 (samples/s) -[2023-08-17 12:25:53,772][inference][INFO] - Saving inference results -[2023-08-17 12:25:53,780][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 86e95a48397ad07c1d8c37d23d8edf39b9b8b13b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index decf3768bd86ff271226a7777c0755dea2b754bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.676608,0.0037,270.0,0.586,171.0 diff --git a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 595c7ec7c12c31e368e9f3823c2b190823105444..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_09:23:16_8992589dd6fcfdfc5f1435a4a9a92da501dd5ab6/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 12:25:59,318][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:25:59,319][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:26:00,898][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 12:26:00,899][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:26:00,899][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:26:00,899][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:26:00,899][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:26:00,900][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:26:01,568][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:26:01,569][inference][INFO] - Running inference benchmark -[2023-08-17 12:26:01,760][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:26:01,808][inference][INFO] - + Forward pass peak memory: 468.676608 (MB) -[2023-08-17 12:26:01,810][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:26:01,849][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:26:06,927][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-08-17 12:26:06,929][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-08-17 12:26:06,929][inference][INFO] - + Warming up the generation pass -[2023-08-17 12:26:07,533][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 12:26:13,393][inference][INFO] - + Generation pass latency: 5.86e-01 (s) -[2023-08-17 12:26:13,394][inference][INFO] - + Generation pass throughput: 171.00 (tokens/s) -[2023-08-17 12:26:13,394][inference][INFO] - Saving inference results -[2023-08-17 12:26:13,406][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 92d0c444f1caf9dba72c6225011940ce77550c5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 9fc6bf70fb7489ac20265ee53520010c528f23ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.673664,0.00394,254.0 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/main.log deleted file mode 100644 index f8513f38468585412565462de0d485a1fb71b4f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:27:15,507][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:27:15,508][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:27:16,877][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:27:16,878][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:27:16,878][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:27:16,878][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:27:16,879][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:27:16,879][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:27:17,611][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:27:17,611][inference][INFO] - Running inference benchmark -[2023-08-17 12:27:17,731][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:27:17,733][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:27:17,807][inference][INFO] - + Forward pass peak memory: 466.673664 (MB) -[2023-08-17 12:27:17,809][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:27:17,811][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:27:17,851][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:27:22,920][inference][INFO] - + Forward pass latency: 3.94e-03 (s) -[2023-08-17 12:27:22,921][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-08-17 12:27:22,922][inference][INFO] - Saving inference results -[2023-08-17 12:27:22,931][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 509e43da1a5993645f4cbb72700809dbd2908d7c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 91a26cf58044200b762de0eb17717431e6c0f2ed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.73862399999996,0.00343,1170.0 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1338ff8f13558c3ebee6d3441f174cddf6fb5ef9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:27:23,313][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:27:23,314][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:27:23,748][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:27:23,748][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:27:23,748][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:27:23,748][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:27:23,748][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:27:23,749][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:27:23,856][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:27:23,857][inference][INFO] - Running inference benchmark -[2023-08-17 12:27:23,977][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:27:23,979][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:27:24,017][inference][INFO] - + Forward pass peak memory: 467.73862399999996 (MB) -[2023-08-17 12:27:24,017][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:27:24,019][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:27:24,054][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:27:29,102][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-17 12:27:29,103][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-17 12:27:29,103][inference][INFO] - Saving inference results -[2023-08-17 12:27:29,110][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 533f66ba7e065fad639dbe39769e170f8d1c688d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c77dfd3da1f99b36943ae1318ebe7654f7dff9ba..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.00019199999997,0.00338,296.0,0.481,208.0 diff --git a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c4a355fe0d126f11a53ab8804ebfdde19935350b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_10:08:11_e7e9261a202dd5623f488f1cb05007e88629f275/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 12:27:33,930][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:27:33,931][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:27:35,337][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 12:27:35,338][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:27:35,338][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:27:35,338][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:27:35,338][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:27:35,338][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:27:35,975][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:27:35,976][inference][INFO] - Running inference benchmark -[2023-08-17 12:27:36,181][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:27:36,227][inference][INFO] - + Forward pass peak memory: 469.00019199999997 (MB) -[2023-08-17 12:27:36,229][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:27:36,265][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:27:41,323][inference][INFO] - + Forward pass latency: 3.38e-03 (s) -[2023-08-17 12:27:41,325][inference][INFO] - + Forward pass throughput: 296.00 (samples/s) -[2023-08-17 12:27:41,326][inference][INFO] - + Warming up the generation pass -[2023-08-17 12:27:41,816][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 12:27:47,114][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-17 12:27:47,115][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-17 12:27:47,115][inference][INFO] - Saving inference results -[2023-08-17 12:27:47,126][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 66d8e37d8a625d7a15a5ab18700944cfdccd8fcf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3a61921d52b23a0896a4e772fd0b85254d411335..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.71871999999996,0.00324,309.0 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/main.log deleted file mode 100644 index e045ce5773d100d78ec0d93d9a7c1ae485864ddc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:28:48,909][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:28:48,911][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:28:50,139][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:28:50,140][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:28:50,140][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:28:50,140][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:28:50,140][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:28:50,141][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:28:50,748][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:28:50,749][inference][INFO] - Running inference benchmark -[2023-08-17 12:28:50,863][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:28:50,864][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:28:50,921][inference][INFO] - + Forward pass peak memory: 466.71871999999996 (MB) -[2023-08-17 12:28:50,922][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:28:50,924][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:28:50,960][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:28:56,011][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-08-17 12:28:56,012][inference][INFO] - + Forward pass throughput: 309.00 (samples/s) -[2023-08-17 12:28:56,013][inference][INFO] - Saving inference results -[2023-08-17 12:28:56,024][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d88ac14569796df876b9be04fc0b3c149d421e9e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 2742ac6b1b1436b80086df3e76c0dd16ea38f8dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.726336,0.00353,1130.0 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/main.log deleted file mode 100644 index de326cbe2b1d6a8126e869e015570a2cf3452e8a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 12:28:56,405][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:28:56,406][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:28:56,923][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 12:28:56,923][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:28:56,924][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:28:56,924][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:28:56,924][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:28:56,924][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:28:57,032][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:28:57,033][inference][INFO] - Running inference benchmark -[2023-08-17 12:28:57,157][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:28:57,159][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:28:57,200][inference][INFO] - + Forward pass peak memory: 467.726336 (MB) -[2023-08-17 12:28:57,201][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 12:28:57,202][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:28:57,238][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:29:02,284][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-17 12:29:02,286][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-17 12:29:02,286][inference][INFO] - Saving inference results -[2023-08-17 12:29:02,296][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a986732abe95ad026e0bd622c11cae603a564498..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index dff3e616083d538befebb1810c106ff23e1dde61..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.15993599999996,0.00361,277.0,0.579,173.0 diff --git a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 191f2b471193c4cae7956524061ddf91c3efaf37..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_11:41:34_1791ef8df647a38b4fcb96c14ddd83a43861d713/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 12:29:07,336][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 12:29:07,337][benchmark][INFO] - + Setting seed(42) -[2023-08-17 12:29:08,806][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 12:29:08,807][backend][INFO] - Configuring pytorch backend -[2023-08-17 12:29:08,807][backend][INFO] - + Checking initial device isolation -[2023-08-17 12:29:08,807][backend][INFO] - + Checking contineous device isolation -[2023-08-17 12:29:08,807][pytorch][INFO] - + Disabling gradients -[2023-08-17 12:29:08,807][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 12:29:09,465][pytorch][INFO] - + Turning on eval mode -[2023-08-17 12:29:09,465][inference][INFO] - Running inference benchmark -[2023-08-17 12:29:09,661][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 12:29:09,705][inference][INFO] - + Forward pass peak memory: 469.15993599999996 (MB) -[2023-08-17 12:29:09,706][inference][INFO] - + Warming up the forward pass -[2023-08-17 12:29:09,742][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 12:29:14,790][inference][INFO] - + Forward pass latency: 3.61e-03 (s) -[2023-08-17 12:29:14,792][inference][INFO] - + Forward pass throughput: 277.00 (samples/s) -[2023-08-17 12:29:14,793][inference][INFO] - + Warming up the generation pass -[2023-08-17 12:29:15,283][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 12:29:20,496][inference][INFO] - + Generation pass latency: 5.79e-01 (s) -[2023-08-17 12:29:20,497][inference][INFO] - + Generation pass throughput: 173.00 (tokens/s) -[2023-08-17 12:29:20,497][inference][INFO] - Saving inference results -[2023-08-17 12:29:20,508][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a3a2c0a783217f761e42655e74b4d448ef26fafc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 90a9ef59b63ae419842c49df91dc91d2c135628b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.05868799999996,0.00374,267.0 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/main.log deleted file mode 100644 index ee14c17a3b7329571a46b55d273062a2ef2aae7a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 14:49:42,609][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 14:49:42,611][benchmark][INFO] - + Setting seed(42) -[2023-08-17 14:49:43,870][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 14:49:43,870][backend][INFO] - Configuring pytorch backend -[2023-08-17 14:49:43,870][backend][INFO] - + Checking initial device isolation -[2023-08-17 14:49:43,870][backend][INFO] - + Checking contineous device isolation -[2023-08-17 14:49:43,870][pytorch][INFO] - + Disabling gradients -[2023-08-17 14:49:43,871][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 14:49:44,472][pytorch][INFO] - + Turning on eval mode -[2023-08-17 14:49:44,472][inference][INFO] - Running inference benchmark -[2023-08-17 14:49:44,590][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 14:49:44,591][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 14:49:44,646][inference][INFO] - + Forward pass peak memory: 467.05868799999996 (MB) -[2023-08-17 14:49:44,647][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 14:49:44,649][inference][INFO] - + Warming up the forward pass -[2023-08-17 14:49:44,691][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 14:49:49,738][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-08-17 14:49:49,740][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-17 14:49:49,740][inference][INFO] - Saving inference results -[2023-08-17 14:49:49,753][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6c27a65ef8d27bd5a1aae063b9446268fc68619d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9540e1551fe5dd7d7cd835807dd814ddaa79cf1c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.103168,0.00378,1060.0 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/main.log deleted file mode 100644 index 875826ed92505ebcbffd6e3eea910610e6ea77a0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 14:49:50,147][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 14:49:50,148][benchmark][INFO] - + Setting seed(42) -[2023-08-17 14:49:50,597][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 14:49:50,597][backend][INFO] - Configuring pytorch backend -[2023-08-17 14:49:50,598][backend][INFO] - + Checking initial device isolation -[2023-08-17 14:49:50,598][backend][INFO] - + Checking contineous device isolation -[2023-08-17 14:49:50,598][pytorch][INFO] - + Disabling gradients -[2023-08-17 14:49:50,598][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 14:49:50,712][pytorch][INFO] - + Turning on eval mode -[2023-08-17 14:49:50,713][inference][INFO] - Running inference benchmark -[2023-08-17 14:49:50,913][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 14:49:50,914][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 14:49:50,959][inference][INFO] - + Forward pass peak memory: 468.103168 (MB) -[2023-08-17 14:49:50,960][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 14:49:50,962][inference][INFO] - + Warming up the forward pass -[2023-08-17 14:49:51,005][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 14:49:56,049][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-17 14:49:56,050][inference][INFO] - + Forward pass throughput: 1060.00 (samples/s) -[2023-08-17 14:49:56,050][inference][INFO] - Saving inference results -[2023-08-17 14:49:56,057][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 207c1d7aa3614a27cc230da1bbbea9e40f2e44c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 6be2b87b37118488720535e3dc8bd2774d888595..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.82815999999997,0.0031,323.0,0.487,205.0 diff --git a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 1adfa3eb9ac666acc985a42d7928422a2d41a1ea..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_13:30:46_d2871b29754abd0f72cf42c299bb1c041519f7bc/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 14:50:00,862][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 14:50:00,864][benchmark][INFO] - + Setting seed(42) -[2023-08-17 14:50:02,320][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 14:50:02,321][backend][INFO] - Configuring pytorch backend -[2023-08-17 14:50:02,321][backend][INFO] - + Checking initial device isolation -[2023-08-17 14:50:02,321][backend][INFO] - + Checking contineous device isolation -[2023-08-17 14:50:02,322][pytorch][INFO] - + Disabling gradients -[2023-08-17 14:50:02,322][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 14:50:03,009][pytorch][INFO] - + Turning on eval mode -[2023-08-17 14:50:03,010][inference][INFO] - Running inference benchmark -[2023-08-17 14:50:03,207][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 14:50:03,254][inference][INFO] - + Forward pass peak memory: 468.82815999999997 (MB) -[2023-08-17 14:50:03,256][inference][INFO] - + Warming up the forward pass -[2023-08-17 14:50:03,288][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 14:50:08,341][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-17 14:50:08,343][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-17 14:50:08,344][inference][INFO] - + Warming up the generation pass -[2023-08-17 14:50:08,840][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 14:50:14,204][inference][INFO] - + Generation pass latency: 4.87e-01 (s) -[2023-08-17 14:50:14,205][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-17 14:50:14,205][inference][INFO] - Saving inference results -[2023-08-17 14:50:14,218][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 4a21d23244da39be51c6e6d9d25f2062bf8333d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index cb302215f945c77e4d95cdf50ace4edf151a365d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,465.825792,0.00321,312.0 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/main.log deleted file mode 100644 index 26bf354168070fb179e94cde9ba24e0dfb576bd9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:49:54,283][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:49:54,284][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:49:55,500][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:49:55,500][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:49:55,500][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:49:55,500][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:49:55,500][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:49:55,500][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:49:56,111][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:49:56,111][inference][INFO] - Running inference benchmark -[2023-08-17 16:49:56,236][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:49:56,238][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:49:56,290][inference][INFO] - + Forward pass peak memory: 465.825792 (MB) -[2023-08-17 16:49:56,291][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:49:56,293][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:49:56,330][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:50:01,381][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-08-17 16:50:01,382][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-17 16:50:01,382][inference][INFO] - Saving inference results -[2023-08-17 16:50:01,392][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 450714d061c84a0b42efaf8ea426896c26e51f42..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 0fc880c42891addb066fe983ad9d7cbd9166ccad..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.84159999999997,0.00354,1130.0 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1e9c20a61b994337706382ccee91602e4c51cce1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:50:01,751][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:50:01,752][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:50:02,192][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:50:02,193][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:50:02,193][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:50:02,193][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:50:02,193][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:50:02,193][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:50:02,306][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:50:02,307][inference][INFO] - Running inference benchmark -[2023-08-17 16:50:02,428][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:50:02,429][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:50:02,468][inference][INFO] - + Forward pass peak memory: 466.84159999999997 (MB) -[2023-08-17 16:50:02,468][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:50:02,470][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:50:02,505][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:50:07,553][inference][INFO] - + Forward pass latency: 3.54e-03 (s) -[2023-08-17 16:50:07,554][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-17 16:50:07,554][inference][INFO] - Saving inference results -[2023-08-17 16:50:07,561][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 26a5deca8049fe10b84e60df0b4ec6136b4446db..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0a1fea7f09f7d4887f66d4692e332d56b30a0e2d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.893696,0.00394,254.0,0.491,204.0 diff --git a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c4dcbdd0c5932aeffcdcfb6c948896ebab839dc9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:00:32_d6bf08f7f6f8bf5d6d94e9b10b7d8203906353ad/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:50:12,313][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:50:12,314][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:50:13,693][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:50:13,693][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:50:13,693][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:50:13,694][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:50:13,694][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:50:13,694][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:50:14,458][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:50:14,458][inference][INFO] - Running inference benchmark -[2023-08-17 16:50:14,652][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:50:14,699][inference][INFO] - + Forward pass peak memory: 468.893696 (MB) -[2023-08-17 16:50:14,701][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:50:14,737][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:50:19,780][inference][INFO] - + Forward pass latency: 3.94e-03 (s) -[2023-08-17 16:50:19,781][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-08-17 16:50:19,782][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:50:20,373][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:50:25,777][inference][INFO] - + Generation pass latency: 4.91e-01 (s) -[2023-08-17 16:50:25,778][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-08-17 16:50:25,778][inference][INFO] - Saving inference results -[2023-08-17 16:50:25,790][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 12040e2de475897e66966f6f025928730b33675c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 60157baae80798fd025d0ef94d2cf2f0fdc3e1c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.059264,0.00375,267.0 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/main.log deleted file mode 100644 index 2e49b65dff077e57d2e64e375dca31946f45b309..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:51:28,514][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:51:28,515][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:51:29,798][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:51:29,799][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:51:29,799][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:51:29,799][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:51:29,799][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:51:29,799][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:51:30,421][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:51:30,421][inference][INFO] - Running inference benchmark -[2023-08-17 16:51:30,542][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:51:30,543][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:51:30,601][inference][INFO] - + Forward pass peak memory: 466.059264 (MB) -[2023-08-17 16:51:30,602][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:51:30,603][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:51:30,641][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:51:35,687][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-17 16:51:35,689][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-17 16:51:35,689][inference][INFO] - Saving inference results -[2023-08-17 16:51:35,700][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 68738cf4a463781c1aa42200de63210bc4b9edbe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e8cd2b3b95a6b970ce542ecfce44150ea932f776..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.099648,0.00433,924.0 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/main.log deleted file mode 100644 index e05e4b53f17dac72a6304fc0725651b4933c6d96..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:51:36,078][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:51:36,079][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:51:36,527][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:51:36,528][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:51:36,528][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:51:36,528][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:51:36,528][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:51:36,528][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:51:36,648][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:51:36,649][inference][INFO] - Running inference benchmark -[2023-08-17 16:51:36,768][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:51:36,769][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:51:36,810][inference][INFO] - + Forward pass peak memory: 467.099648 (MB) -[2023-08-17 16:51:36,811][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:51:36,813][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:51:36,856][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:51:41,895][inference][INFO] - + Forward pass latency: 4.33e-03 (s) -[2023-08-17 16:51:41,896][inference][INFO] - + Forward pass throughput: 924.00 (samples/s) -[2023-08-17 16:51:41,896][inference][INFO] - Saving inference results -[2023-08-17 16:51:41,904][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d6487e9a12f9f8dacf6b3b46756801a5db20348f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a934802544e9c037a5be415d194b353e63c7bfbe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.26643199999995,0.00376,266.0,0.519,193.0 diff --git a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a27fac7ae844ed359bc5de067a7750582ffafbca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:03:41_5347d00092c4f2429389269dd912417e8daff848/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:51:46,621][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:51:46,622][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:51:48,038][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:51:48,038][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:51:48,038][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:51:48,038][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:51:48,038][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:51:48,039][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:51:48,901][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:51:48,901][inference][INFO] - Running inference benchmark -[2023-08-17 16:51:49,090][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:51:49,134][inference][INFO] - + Forward pass peak memory: 469.26643199999995 (MB) -[2023-08-17 16:51:49,136][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:51:49,166][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:51:54,209][inference][INFO] - + Forward pass latency: 3.76e-03 (s) -[2023-08-17 16:51:54,210][inference][INFO] - + Forward pass throughput: 266.00 (samples/s) -[2023-08-17 16:51:54,211][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:51:54,795][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:51:59,988][inference][INFO] - + Generation pass latency: 5.19e-01 (s) -[2023-08-17 16:51:59,989][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-17 16:51:59,989][inference][INFO] - Saving inference results -[2023-08-17 16:52:00,000][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9287e19cc5678fe800c761e6b4a2789967d1b17d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index eaf53038f0aff919d5f027dec81014f6947ccd55..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.41152,0.00372,269.0 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/main.log deleted file mode 100644 index b012abf7524fb827cd76950e9b6559d46ce58138..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:53:02,261][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:53:02,262][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:53:03,715][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:53:03,716][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:53:03,716][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:53:03,716][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:53:03,716][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:53:03,716][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:53:04,373][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:53:04,373][inference][INFO] - Running inference benchmark -[2023-08-17 16:53:04,496][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:53:04,497][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:53:04,558][inference][INFO] - + Forward pass peak memory: 466.41152 (MB) -[2023-08-17 16:53:04,559][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:53:04,561][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:53:04,598][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:53:09,643][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-08-17 16:53:09,644][inference][INFO] - + Forward pass throughput: 269.00 (samples/s) -[2023-08-17 16:53:09,645][inference][INFO] - Saving inference results -[2023-08-17 16:53:09,656][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6cdba6f375af1f1c1ebbd5cf4e60352be5b3fd1c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d7e852444043c3c64b547432c44b44f316cecd88..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.431424,0.0041,976.0 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/main.log deleted file mode 100644 index c6ad16e5162a927a14135d51317a702a0999deef..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:53:10,040][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:53:10,041][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:53:10,480][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:53:10,480][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:53:10,480][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:53:10,481][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:53:10,481][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:53:10,481][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:53:10,713][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:53:10,713][inference][INFO] - Running inference benchmark -[2023-08-17 16:53:10,844][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:53:10,846][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:53:10,886][inference][INFO] - + Forward pass peak memory: 467.431424 (MB) -[2023-08-17 16:53:10,887][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:53:10,889][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:53:10,930][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:53:15,974][inference][INFO] - + Forward pass latency: 4.10e-03 (s) -[2023-08-17 16:53:15,975][inference][INFO] - + Forward pass throughput: 976.00 (samples/s) -[2023-08-17 16:53:15,975][inference][INFO] - Saving inference results -[2023-08-17 16:53:15,984][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 32bec673d8004bbc62e4341c380378ff8a9467de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0e7ae7cd558788192fecb00370898ed415832e32..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.31968,0.00378,265.0,0.484,207.0 diff --git a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 378494e9841e325902815c53454e8647dc817653..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:08:05_b4d554880013bf97718e1e1332715eeaba7dee17/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:53:20,853][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:53:20,853][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:53:22,311][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:53:22,311][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:53:22,312][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:53:22,312][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:53:22,312][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:53:22,312][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:53:22,957][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:53:22,958][inference][INFO] - Running inference benchmark -[2023-08-17 16:53:23,153][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:53:23,196][inference][INFO] - + Forward pass peak memory: 469.31968 (MB) -[2023-08-17 16:53:23,198][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:53:23,228][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:53:28,274][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-17 16:53:28,276][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-17 16:53:28,277][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:53:28,831][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:53:34,159][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-08-17 16:53:34,160][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-17 16:53:34,160][inference][INFO] - Saving inference results -[2023-08-17 16:53:34,173][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 8d8e0d94678bca55dadebc614259a70d063b8e48..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c22a385258620d6658a8c6ab310b5d14486500ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.140608,0.00321,312.0 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8df0759ad765d43770ee9bb1f1f9b2071894ae3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:54:36,675][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:54:36,676][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:54:37,933][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:54:37,933][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:54:37,933][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:54:37,934][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:54:37,934][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:54:37,934][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:54:38,546][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:54:38,547][inference][INFO] - Running inference benchmark -[2023-08-17 16:54:38,665][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:54:38,667][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:54:38,725][inference][INFO] - + Forward pass peak memory: 467.140608 (MB) -[2023-08-17 16:54:38,726][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:54:38,727][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:54:38,762][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:54:43,814][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-08-17 16:54:43,815][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-17 16:54:43,815][inference][INFO] - Saving inference results -[2023-08-17 16:54:43,825][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e88318d8be8f1a7c2526268c34492d53029635f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index eb2ab37ebcfec95806fe86be9d27cbe4d7d9f59a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.267008,0.00363,1100.0 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/main.log deleted file mode 100644 index 177416f4313d888f37b7c08b1fc8e26b89b040f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:54:44,196][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:54:44,197][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:54:44,772][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:54:44,772][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:54:44,772][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:54:44,772][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:54:44,772][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:54:44,773][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:54:44,889][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:54:44,889][inference][INFO] - Running inference benchmark -[2023-08-17 16:54:45,015][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:54:45,016][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:54:45,057][inference][INFO] - + Forward pass peak memory: 468.267008 (MB) -[2023-08-17 16:54:45,058][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:54:45,060][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:54:45,111][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:54:50,155][inference][INFO] - + Forward pass latency: 3.63e-03 (s) -[2023-08-17 16:54:50,156][inference][INFO] - + Forward pass throughput: 1100.00 (samples/s) -[2023-08-17 16:54:50,156][inference][INFO] - Saving inference results -[2023-08-17 16:54:50,164][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e924fd4683d8624b97c93114fa1e2c7626c48e25..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 528c272be9c585b92b147fe5625add3ab1fdf31d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.82406399999996,0.00315,317.0,0.485,206.0 diff --git a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e91e6e38327739b2e90d3e1cda01496618da3302..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:19:54_9264fc915a3295c6fd0e05f54ee409917ac43f60/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:54:54,887][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:54:54,887][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:54:56,364][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:54:56,364][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:54:56,364][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:54:56,364][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:54:56,364][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:54:56,365][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:54:56,999][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:54:57,000][inference][INFO] - Running inference benchmark -[2023-08-17 16:54:57,206][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:54:57,246][inference][INFO] - + Forward pass peak memory: 468.82406399999996 (MB) -[2023-08-17 16:54:57,247][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:54:57,278][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:55:02,330][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-17 16:55:02,331][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-17 16:55:02,331][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:55:02,817][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:55:08,150][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-08-17 16:55:08,151][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-17 16:55:08,151][inference][INFO] - Saving inference results -[2023-08-17 16:55:08,163][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 231a1afcaa573b1f1b67fcd83b8c3f1f5500c006..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index aeb171079e2cf6688ffa01a2da792c6802086020..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.873792,0.0032,312.0 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/main.log deleted file mode 100644 index 130bd022a2d2904b575ab64748b4d6c3eef7d231..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:56:10,282][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:56:10,283][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:56:11,520][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:56:11,520][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:56:11,520][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:56:11,521][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:56:11,521][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:56:11,521][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:56:12,136][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:56:12,137][inference][INFO] - Running inference benchmark -[2023-08-17 16:56:12,261][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:56:12,262][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:56:12,320][inference][INFO] - + Forward pass peak memory: 467.873792 (MB) -[2023-08-17 16:56:12,321][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:56:12,323][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:56:12,356][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:56:17,404][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-08-17 16:56:17,406][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-17 16:56:17,406][inference][INFO] - Saving inference results -[2023-08-17 16:56:17,418][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2d686b1c539cc020e3d3cda3ef3fe73648b419a0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b0a723987f286c5f745446cbe13bfe77cd79e2c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.98380799999995,0.00355,1130.0 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7dab15782554c1ab24403520d3f3288924cd6be2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:56:17,793][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:56:17,793][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:56:18,242][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:56:18,243][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:56:18,243][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:56:18,243][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:56:18,243][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:56:18,244][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:56:18,367][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:56:18,368][inference][INFO] - Running inference benchmark -[2023-08-17 16:56:18,491][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:56:18,492][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:56:18,534][inference][INFO] - + Forward pass peak memory: 468.98380799999995 (MB) -[2023-08-17 16:56:18,535][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:56:18,537][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:56:18,574][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:56:23,619][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-08-17 16:56:23,620][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-17 16:56:23,620][inference][INFO] - Saving inference results -[2023-08-17 16:56:23,629][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c9de8f9dffbd859e6b0b5cfcffe78b024ed52a37..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 02372a393e2bb98388a9eae3393ebecc7c9026e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.87731199999996,0.0033,303.0,0.482,207.0 diff --git a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 7aee92a62254d405846b9df421ef3523bfb4596f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:21:56_181d778f83bf6e58c1d69a7599afb2bb9ceff21e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:56:28,661][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:56:28,662][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:56:30,689][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:56:30,690][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:56:30,690][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:56:30,690][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:56:30,690][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:56:30,690][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:56:31,566][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:56:31,567][inference][INFO] - Running inference benchmark -[2023-08-17 16:56:31,802][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:56:31,846][inference][INFO] - + Forward pass peak memory: 468.87731199999996 (MB) -[2023-08-17 16:56:31,847][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:56:31,879][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:56:36,929][inference][INFO] - + Forward pass latency: 3.30e-03 (s) -[2023-08-17 16:56:36,930][inference][INFO] - + Forward pass throughput: 303.00 (samples/s) -[2023-08-17 16:56:36,931][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:56:37,431][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:56:42,736][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-17 16:56:42,737][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-17 16:56:42,737][inference][INFO] - Saving inference results -[2023-08-17 16:56:42,749][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 91a512039d3e009b0b702a9b4e38f0ee8ae40db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 1463f9b2dbd62b17d60880f0782c621390977b25..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.980864,0.0031,323.0 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/main.log deleted file mode 100644 index f64fd670b1101e7b52509785533b8f88889a184c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:57:45,881][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:57:45,882][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:57:47,159][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:57:47,160][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:57:47,160][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:57:47,160][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:57:47,160][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:57:47,161][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:57:47,809][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:57:47,810][inference][INFO] - Running inference benchmark -[2023-08-17 16:57:47,936][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:57:47,937][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:57:47,995][inference][INFO] - + Forward pass peak memory: 466.980864 (MB) -[2023-08-17 16:57:47,996][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:57:47,997][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:57:48,035][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:57:53,087][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-17 16:57:53,089][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-17 16:57:53,089][inference][INFO] - Saving inference results -[2023-08-17 16:57:53,100][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2bab1d625c1b6d3405814a73c1b34aedaec2484b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3069de7a800baeeadbf7c35cd06c8fd3b9d4b9c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.08678399999997,0.00342,1170.0 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/main.log deleted file mode 100644 index b92683bfe663318fa45afdb80d12730fce22a509..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:57:53,477][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:57:53,478][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:57:53,934][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:57:53,934][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:57:53,934][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:57:53,935][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:57:53,935][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:57:53,935][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:57:54,054][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:57:54,054][inference][INFO] - Running inference benchmark -[2023-08-17 16:57:54,177][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:57:54,178][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:57:54,221][inference][INFO] - + Forward pass peak memory: 468.08678399999997 (MB) -[2023-08-17 16:57:54,222][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:57:54,223][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:57:54,259][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:57:59,307][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-08-17 16:57:59,308][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-17 16:57:59,308][inference][INFO] - Saving inference results -[2023-08-17 16:57:59,317][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d47a6f8ec682741e7274d13d7c155b01d6a34fba..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 21ef7df75757e05cc8c1cd668d5e1d9f581259a8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.004288,0.00375,267.0,0.567,176.0 diff --git a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 557980525a8ec42ed6c0df89a83ffa4dbc6f5d34..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:34:25_d4c0aa1443557981a0690c0593be7b0f6ffd53cf/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:58:04,013][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:58:04,014][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:58:05,661][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:58:05,662][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:58:05,662][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:58:05,662][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:58:05,662][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:58:05,662][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:58:06,379][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:58:06,380][inference][INFO] - Running inference benchmark -[2023-08-17 16:58:06,575][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:58:06,626][inference][INFO] - + Forward pass peak memory: 469.004288 (MB) -[2023-08-17 16:58:06,627][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:58:06,664][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:58:11,710][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-17 16:58:11,711][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-17 16:58:11,712][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:58:12,272][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:58:17,378][inference][INFO] - + Generation pass latency: 5.67e-01 (s) -[2023-08-17 16:58:17,379][inference][INFO] - + Generation pass throughput: 176.00 (tokens/s) -[2023-08-17 16:58:17,379][inference][INFO] - Saving inference results -[2023-08-17 16:58:17,390][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e523a447a48ba34ffac8dfd8b23d88f56f85352c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 49ac1f45c9119bd6df795b4360e73bf0679bcb70..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.706432,0.00377,265.0 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5bc751e0b56067715480ef7f29899a43537766e9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:59:20,153][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:59:20,154][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:59:21,410][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:59:21,411][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:59:21,411][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:59:21,411][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:59:21,411][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:59:21,412][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:59:22,033][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:59:22,033][inference][INFO] - Running inference benchmark -[2023-08-17 16:59:22,160][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:59:22,161][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:59:22,219][inference][INFO] - + Forward pass peak memory: 466.706432 (MB) -[2023-08-17 16:59:22,220][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:59:22,222][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:59:22,264][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:59:27,310][inference][INFO] - + Forward pass latency: 3.77e-03 (s) -[2023-08-17 16:59:27,311][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-17 16:59:27,311][inference][INFO] - Saving inference results -[2023-08-17 16:59:27,322][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cc12c73060e7db34b79c0c85b2a90eae63d65701..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 86aed4ba6aa0939cc566cba76346ceadc8dc8dcd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.73452799999995,0.00428,935.0 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/main.log deleted file mode 100644 index d6b1063ac504205a6aeb958e4ae93fbafcbb2dc6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 16:59:27,702][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:59:27,703][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:59:28,157][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 16:59:28,157][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:59:28,158][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:59:28,158][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:59:28,158][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:59:28,158][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:59:28,285][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:59:28,286][inference][INFO] - Running inference benchmark -[2023-08-17 16:59:28,421][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:59:28,422][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:59:28,465][inference][INFO] - + Forward pass peak memory: 467.73452799999995 (MB) -[2023-08-17 16:59:28,466][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 16:59:28,468][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:59:28,512][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:59:33,551][inference][INFO] - + Forward pass latency: 4.28e-03 (s) -[2023-08-17 16:59:33,552][inference][INFO] - + Forward pass throughput: 935.00 (samples/s) -[2023-08-17 16:59:33,552][inference][INFO] - Saving inference results -[2023-08-17 16:59:33,561][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6b1bc9eac28078c7ea2f5b8dd50203b55caffee7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 67774a4a50b04ef2b021438294b6181fbac07c46..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.04115199999995,0.00344,291.0,0.479,209.0 diff --git a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0748b5c1aeaa6ce8669c429e358ef3e1ebd074a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_15:44:01_4e1dee0e8e06c1146d023c43812b88bfe2763329/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 16:59:38,295][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 16:59:38,296][benchmark][INFO] - + Setting seed(42) -[2023-08-17 16:59:39,696][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 16:59:39,696][backend][INFO] - Configuring pytorch backend -[2023-08-17 16:59:39,697][backend][INFO] - + Checking initial device isolation -[2023-08-17 16:59:39,697][backend][INFO] - + Checking contineous device isolation -[2023-08-17 16:59:39,697][pytorch][INFO] - + Disabling gradients -[2023-08-17 16:59:39,697][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 16:59:40,344][pytorch][INFO] - + Turning on eval mode -[2023-08-17 16:59:40,345][inference][INFO] - Running inference benchmark -[2023-08-17 16:59:40,539][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 16:59:40,584][inference][INFO] - + Forward pass peak memory: 469.04115199999995 (MB) -[2023-08-17 16:59:40,585][inference][INFO] - + Warming up the forward pass -[2023-08-17 16:59:40,617][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 16:59:45,665][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-08-17 16:59:45,667][inference][INFO] - + Forward pass throughput: 291.00 (samples/s) -[2023-08-17 16:59:45,667][inference][INFO] - + Warming up the generation pass -[2023-08-17 16:59:46,155][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 16:59:51,431][inference][INFO] - + Generation pass latency: 4.79e-01 (s) -[2023-08-17 16:59:51,432][inference][INFO] - + Generation pass throughput: 209.00 (tokens/s) -[2023-08-17 16:59:51,432][inference][INFO] - Saving inference results -[2023-08-17 16:59:51,444][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5537afd519c851267b9faff09e875bdfd6f28a60..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index bca508ad5e5feb01a661b06596b95dacc00495d9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.849792,0.0032,312.0 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/main.log deleted file mode 100644 index 35891e76d4dd57c4dfbe80fcace6249b7ce9c6da..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 17:00:54,449][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 17:00:54,449][benchmark][INFO] - + Setting seed(42) -[2023-08-17 17:00:55,675][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 17:00:55,676][backend][INFO] - Configuring pytorch backend -[2023-08-17 17:00:55,676][backend][INFO] - + Checking initial device isolation -[2023-08-17 17:00:55,676][backend][INFO] - + Checking contineous device isolation -[2023-08-17 17:00:55,676][pytorch][INFO] - + Disabling gradients -[2023-08-17 17:00:55,677][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 17:00:56,293][pytorch][INFO] - + Turning on eval mode -[2023-08-17 17:00:56,294][inference][INFO] - Running inference benchmark -[2023-08-17 17:00:56,420][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:00:56,421][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 17:00:56,477][inference][INFO] - + Forward pass peak memory: 466.849792 (MB) -[2023-08-17 17:00:56,478][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:00:56,480][inference][INFO] - + Warming up the forward pass -[2023-08-17 17:00:56,513][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 17:01:01,563][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-08-17 17:01:01,564][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-17 17:01:01,564][inference][INFO] - Saving inference results -[2023-08-17 17:01:01,576][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index df0236479de2b251f9582d62572c7518a87d9dce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d0137a2672172e589d2db788210dd4d73998d0a1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.94752,0.00361,1110.0 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/main.log deleted file mode 100644 index fdd36bf30a3ae78a740f8306a21f843c59b542a2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 17:01:01,946][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 17:01:01,947][benchmark][INFO] - + Setting seed(42) -[2023-08-17 17:01:02,414][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 17:01:02,414][backend][INFO] - Configuring pytorch backend -[2023-08-17 17:01:02,414][backend][INFO] - + Checking initial device isolation -[2023-08-17 17:01:02,414][backend][INFO] - + Checking contineous device isolation -[2023-08-17 17:01:02,415][pytorch][INFO] - + Disabling gradients -[2023-08-17 17:01:02,415][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 17:01:02,537][pytorch][INFO] - + Turning on eval mode -[2023-08-17 17:01:02,538][inference][INFO] - Running inference benchmark -[2023-08-17 17:01:02,659][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:01:02,660][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 17:01:02,703][inference][INFO] - + Forward pass peak memory: 467.94752 (MB) -[2023-08-17 17:01:02,704][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:01:02,706][inference][INFO] - + Warming up the forward pass -[2023-08-17 17:01:02,758][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 17:01:07,800][inference][INFO] - + Forward pass latency: 3.61e-03 (s) -[2023-08-17 17:01:07,802][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-17 17:01:07,802][inference][INFO] - Saving inference results -[2023-08-17 17:01:07,810][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 262117b61873268cf139684c0ab551022f9a6fc3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8634483d1acf4c16db3503c68b036b8f5678d121..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.250048,0.00383,261.0,0.513,195.0 diff --git a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 06f09807318d40b16a771108e2504d559a2b7f6a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:23:34_c4c0ceff096473cb4e47ef2f067640bcdf0b32e0/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 17:01:13,633][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 17:01:13,633][benchmark][INFO] - + Setting seed(42) -[2023-08-17 17:01:15,068][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 17:01:15,069][backend][INFO] - Configuring pytorch backend -[2023-08-17 17:01:15,069][backend][INFO] - + Checking initial device isolation -[2023-08-17 17:01:15,069][backend][INFO] - + Checking contineous device isolation -[2023-08-17 17:01:15,069][pytorch][INFO] - + Disabling gradients -[2023-08-17 17:01:15,070][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 17:01:15,707][pytorch][INFO] - + Turning on eval mode -[2023-08-17 17:01:15,708][inference][INFO] - Running inference benchmark -[2023-08-17 17:01:15,904][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 17:01:15,952][inference][INFO] - + Forward pass peak memory: 469.250048 (MB) -[2023-08-17 17:01:15,953][inference][INFO] - + Warming up the forward pass -[2023-08-17 17:01:15,987][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 17:01:21,036][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-17 17:01:21,038][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-17 17:01:21,038][inference][INFO] - + Warming up the generation pass -[2023-08-17 17:01:21,541][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 17:01:26,669][inference][INFO] - + Generation pass latency: 5.13e-01 (s) -[2023-08-17 17:01:26,670][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-08-17 17:01:26,670][inference][INFO] - Saving inference results -[2023-08-17 17:01:26,681][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 725017e0508a925d3f911eae5681f18f6cba3891..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0645dd4230c9f71d5204dafc7eafb5a68b38fd22..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.61983999999995,0.00367,272.0 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/main.log deleted file mode 100644 index 238b8bb2d1c04d8f479c3a131cda2bffa8f2a8d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 17:02:29,676][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 17:02:29,677][benchmark][INFO] - + Setting seed(42) -[2023-08-17 17:02:31,427][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 17:02:31,427][backend][INFO] - Configuring pytorch backend -[2023-08-17 17:02:31,427][backend][INFO] - + Checking initial device isolation -[2023-08-17 17:02:31,427][backend][INFO] - + Checking contineous device isolation -[2023-08-17 17:02:31,428][pytorch][INFO] - + Disabling gradients -[2023-08-17 17:02:31,428][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 17:02:32,226][pytorch][INFO] - + Turning on eval mode -[2023-08-17 17:02:32,226][inference][INFO] - Running inference benchmark -[2023-08-17 17:02:32,353][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:02:32,354][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 17:02:32,412][inference][INFO] - + Forward pass peak memory: 467.61983999999995 (MB) -[2023-08-17 17:02:32,414][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:02:32,415][inference][INFO] - + Warming up the forward pass -[2023-08-17 17:02:32,456][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 17:02:37,502][inference][INFO] - + Forward pass latency: 3.67e-03 (s) -[2023-08-17 17:02:37,504][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-08-17 17:02:37,504][inference][INFO] - Saving inference results -[2023-08-17 17:02:37,515][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6ca023817bad4c177f29c0f561d3545974e09fb6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 39bf28214e08b2e3666963401d28968909af8f6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.6848,0.00413,969.0 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/main.log deleted file mode 100644 index f50c87e1e0b5c72cbf5e7b323d69c74de791b53b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 17:02:37,878][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 17:02:37,879][benchmark][INFO] - + Setting seed(42) -[2023-08-17 17:02:38,327][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 17:02:38,327][backend][INFO] - Configuring pytorch backend -[2023-08-17 17:02:38,328][backend][INFO] - + Checking initial device isolation -[2023-08-17 17:02:38,328][backend][INFO] - + Checking contineous device isolation -[2023-08-17 17:02:38,328][pytorch][INFO] - + Disabling gradients -[2023-08-17 17:02:38,328][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 17:02:38,445][pytorch][INFO] - + Turning on eval mode -[2023-08-17 17:02:38,445][inference][INFO] - Running inference benchmark -[2023-08-17 17:02:38,577][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:02:38,579][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 17:02:38,622][inference][INFO] - + Forward pass peak memory: 468.6848 (MB) -[2023-08-17 17:02:38,623][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 17:02:38,624][inference][INFO] - + Warming up the forward pass -[2023-08-17 17:02:38,667][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 17:02:43,706][inference][INFO] - + Forward pass latency: 4.13e-03 (s) -[2023-08-17 17:02:43,707][inference][INFO] - + Forward pass throughput: 969.00 (samples/s) -[2023-08-17 17:02:43,707][inference][INFO] - Saving inference results -[2023-08-17 17:02:43,715][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e3b534458817ce160efdef241114ddb201350104..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5e211586f2c5037ae767e59c1322ff1139ef9488..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.1968,0.00315,317.0,0.483,207.0 diff --git a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index cf88080424d13667cbec9b2682d59351618d1df3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:34:47_b8f69d0d10e74c3718e5c79891fdc2a5ac8887d0/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 17:02:48,447][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 17:02:48,449][benchmark][INFO] - + Setting seed(42) -[2023-08-17 17:02:50,185][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 17:02:50,186][backend][INFO] - Configuring pytorch backend -[2023-08-17 17:02:50,186][backend][INFO] - + Checking initial device isolation -[2023-08-17 17:02:50,186][backend][INFO] - + Checking contineous device isolation -[2023-08-17 17:02:50,186][pytorch][INFO] - + Disabling gradients -[2023-08-17 17:02:50,187][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 17:02:50,860][pytorch][INFO] - + Turning on eval mode -[2023-08-17 17:02:50,861][inference][INFO] - Running inference benchmark -[2023-08-17 17:02:51,065][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 17:02:51,112][inference][INFO] - + Forward pass peak memory: 469.1968 (MB) -[2023-08-17 17:02:51,114][inference][INFO] - + Warming up the forward pass -[2023-08-17 17:02:51,148][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 17:02:56,196][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-17 17:02:56,197][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-17 17:02:56,198][inference][INFO] - + Warming up the generation pass -[2023-08-17 17:02:56,691][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 17:03:02,008][inference][INFO] - + Generation pass latency: 4.83e-01 (s) -[2023-08-17 17:03:02,009][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-17 17:03:02,009][inference][INFO] - Saving inference results -[2023-08-17 17:03:02,021][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e1d52131ba628d9cacc3e0f4288fcdc93143567c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 43538841efc2fe3942d90333418e192a6b043d04..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.98496,0.00358,279.0 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/main.log deleted file mode 100644 index db0d39b1e561961f0ffdcae6352b3294b367df40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 18:49:53,465][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 18:49:53,466][benchmark][INFO] - + Setting seed(42) -[2023-08-17 18:49:54,888][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 18:49:54,889][backend][INFO] - Configuring pytorch backend -[2023-08-17 18:49:54,889][backend][INFO] - + Checking initial device isolation -[2023-08-17 18:49:54,889][backend][INFO] - + Checking contineous device isolation -[2023-08-17 18:49:54,889][pytorch][INFO] - + Disabling gradients -[2023-08-17 18:49:54,890][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 18:49:55,577][pytorch][INFO] - + Turning on eval mode -[2023-08-17 18:49:55,577][inference][INFO] - Running inference benchmark -[2023-08-17 18:49:55,702][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 18:49:55,704][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 18:49:55,763][inference][INFO] - + Forward pass peak memory: 466.98496 (MB) -[2023-08-17 18:49:55,765][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 18:49:55,766][inference][INFO] - + Warming up the forward pass -[2023-08-17 18:49:55,803][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 18:50:00,851][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-17 18:50:00,852][inference][INFO] - + Forward pass throughput: 279.00 (samples/s) -[2023-08-17 18:50:00,852][inference][INFO] - Saving inference results -[2023-08-17 18:50:00,863][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 205cf3131707dcc2d169446f05a73d9b4c64958d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d01360b8107a2f519e04f614093e1d8b205bc8ab..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.980288,0.00347,1150.0 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/main.log deleted file mode 100644 index 88fda71b6e28751b59ec492915d145bcaaa6314e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 18:50:01,233][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 18:50:01,234][benchmark][INFO] - + Setting seed(42) -[2023-08-17 18:50:01,689][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 18:50:01,689][backend][INFO] - Configuring pytorch backend -[2023-08-17 18:50:01,689][backend][INFO] - + Checking initial device isolation -[2023-08-17 18:50:01,689][backend][INFO] - + Checking contineous device isolation -[2023-08-17 18:50:01,690][pytorch][INFO] - + Disabling gradients -[2023-08-17 18:50:01,690][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 18:50:01,833][pytorch][INFO] - + Turning on eval mode -[2023-08-17 18:50:01,833][inference][INFO] - Running inference benchmark -[2023-08-17 18:50:01,956][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 18:50:01,957][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 18:50:01,998][inference][INFO] - + Forward pass peak memory: 467.980288 (MB) -[2023-08-17 18:50:01,999][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 18:50:02,001][inference][INFO] - + Warming up the forward pass -[2023-08-17 18:50:02,037][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 18:50:07,082][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-08-17 18:50:07,084][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-17 18:50:07,084][inference][INFO] - Saving inference results -[2023-08-17 18:50:07,092][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 47b2164ca5c8aee4386d0964f305ed9f9975c2d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index dba7c847f68f09ac9fc05e3127ee10af0b4eb850..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.27462399999996,0.00382,262.0,0.482,207.0 diff --git a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 489cb63ba6281fcc8cfd07a4fb0353ef0893ff6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_16:56:34_427adc898ab49c321d58ff4011fa54133adf62c2/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 18:50:11,836][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 18:50:11,838][benchmark][INFO] - + Setting seed(42) -[2023-08-17 18:50:13,220][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 18:50:13,220][backend][INFO] - Configuring pytorch backend -[2023-08-17 18:50:13,220][backend][INFO] - + Checking initial device isolation -[2023-08-17 18:50:13,220][backend][INFO] - + Checking contineous device isolation -[2023-08-17 18:50:13,220][pytorch][INFO] - + Disabling gradients -[2023-08-17 18:50:13,221][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 18:50:13,849][pytorch][INFO] - + Turning on eval mode -[2023-08-17 18:50:13,850][inference][INFO] - Running inference benchmark -[2023-08-17 18:50:14,035][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 18:50:14,083][inference][INFO] - + Forward pass peak memory: 469.27462399999996 (MB) -[2023-08-17 18:50:14,085][inference][INFO] - + Warming up the forward pass -[2023-08-17 18:50:14,117][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 18:50:19,163][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-17 18:50:19,165][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-17 18:50:19,166][inference][INFO] - + Warming up the generation pass -[2023-08-17 18:50:19,654][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 18:50:24,954][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-17 18:50:24,955][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-17 18:50:24,955][inference][INFO] - Saving inference results -[2023-08-17 18:50:24,966][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index cebd5ae1588602a077716b4be261055dbba8d39a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 27515425f243fcfa544385a4004e10f253c8b623..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.18976,0.00375,267.0 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/main.log b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/main.log deleted file mode 100644 index be5b76290a5ddfc8342e28cc941e40835095c3b3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 20:49:42,397][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 20:49:42,398][benchmark][INFO] - + Setting seed(42) -[2023-08-17 20:49:43,651][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 20:49:43,652][backend][INFO] - Configuring pytorch backend -[2023-08-17 20:49:43,652][backend][INFO] - + Checking initial device isolation -[2023-08-17 20:49:43,652][backend][INFO] - + Checking contineous device isolation -[2023-08-17 20:49:43,652][pytorch][INFO] - + Disabling gradients -[2023-08-17 20:49:43,653][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 20:49:44,341][pytorch][INFO] - + Turning on eval mode -[2023-08-17 20:49:44,341][inference][INFO] - Running inference benchmark -[2023-08-17 20:49:44,461][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 20:49:44,463][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 20:49:44,520][inference][INFO] - + Forward pass peak memory: 467.18976 (MB) -[2023-08-17 20:49:44,521][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 20:49:44,523][inference][INFO] - + Warming up the forward pass -[2023-08-17 20:49:44,560][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 20:49:49,606][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-17 20:49:49,608][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-17 20:49:49,608][inference][INFO] - Saving inference results -[2023-08-17 20:49:49,619][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 02894487d96ebf0adf53ab047dc93dcb4e5eb545..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 91b8a7d7254c2e4709a8b0fd240f78179dd02a03..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.25062399999996,0.00426,939.0 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/main.log b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/main.log deleted file mode 100644 index bd3c86f5a8499c90da6647b1d2471072d5436763..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-17 20:49:50,132][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 20:49:50,133][benchmark][INFO] - + Setting seed(42) -[2023-08-17 20:49:50,575][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-17 20:49:50,575][backend][INFO] - Configuring pytorch backend -[2023-08-17 20:49:50,575][backend][INFO] - + Checking initial device isolation -[2023-08-17 20:49:50,575][backend][INFO] - + Checking contineous device isolation -[2023-08-17 20:49:50,575][pytorch][INFO] - + Disabling gradients -[2023-08-17 20:49:50,576][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 20:49:50,694][pytorch][INFO] - + Turning on eval mode -[2023-08-17 20:49:50,695][inference][INFO] - Running inference benchmark -[2023-08-17 20:49:50,817][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 20:49:50,819][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 20:49:50,863][inference][INFO] - + Forward pass peak memory: 468.25062399999996 (MB) -[2023-08-17 20:49:50,863][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-17 20:49:50,865][inference][INFO] - + Warming up the forward pass -[2023-08-17 20:49:50,908][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 20:49:55,948][inference][INFO] - + Forward pass latency: 4.26e-03 (s) -[2023-08-17 20:49:55,950][inference][INFO] - + Forward pass throughput: 939.00 (samples/s) -[2023-08-17 20:49:55,950][inference][INFO] - Saving inference results -[2023-08-17 20:49:55,957][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1c166b6d5a4347f59969186eba117752af1a3500..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 89a7863fe180fd8b76d546b837edddc39d344108..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.254144,0.00404,248.0,0.48,208.0 diff --git a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8f7a7806d3437ab2b8a2192dd8c85eff85d42f5c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-17_18:48:58_4a27c13f1eee26393d60d381e500e1a61970e8ee/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-17 20:50:00,691][benchmark][INFO] - Configuring inference benchmark -[2023-08-17 20:50:00,691][benchmark][INFO] - + Setting seed(42) -[2023-08-17 20:50:02,196][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-17 20:50:02,197][backend][INFO] - Configuring pytorch backend -[2023-08-17 20:50:02,197][backend][INFO] - + Checking initial device isolation -[2023-08-17 20:50:02,197][backend][INFO] - + Checking contineous device isolation -[2023-08-17 20:50:02,197][pytorch][INFO] - + Disabling gradients -[2023-08-17 20:50:02,198][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-17 20:50:03,144][pytorch][INFO] - + Turning on eval mode -[2023-08-17 20:50:03,145][inference][INFO] - Running inference benchmark -[2023-08-17 20:50:03,339][inference][INFO] - + Tracking forward pass peak memory -[2023-08-17 20:50:03,387][inference][INFO] - + Forward pass peak memory: 469.254144 (MB) -[2023-08-17 20:50:03,388][inference][INFO] - + Warming up the forward pass -[2023-08-17 20:50:03,421][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-17 20:50:08,465][inference][INFO] - + Forward pass latency: 4.04e-03 (s) -[2023-08-17 20:50:08,467][inference][INFO] - + Forward pass throughput: 248.00 (samples/s) -[2023-08-17 20:50:08,467][inference][INFO] - + Warming up the generation pass -[2023-08-17 20:50:08,960][inference][INFO] - + Tracking generation latency and throughput -[2023-08-17 20:50:14,245][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-08-17 20:50:14,246][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-17 20:50:14,246][inference][INFO] - Saving inference results -[2023-08-17 20:50:14,258][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ee42ce1af80434199796ec6c58e7ede9afad88dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index bee431ccf34a4f690b7e9a0ad7eaf052fb825c59..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.39104,0.00314,318.0 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/main.log deleted file mode 100644 index a1d44a21e79b7f24d31a7385a6e8462834a2e3a0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 06:49:41,138][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 06:49:41,139][benchmark][INFO] - + Setting seed(42) -[2023-08-18 06:49:42,412][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 06:49:42,412][backend][INFO] - Configuring pytorch backend -[2023-08-18 06:49:42,412][backend][INFO] - + Checking initial device isolation -[2023-08-18 06:49:42,412][backend][INFO] - + Checking contineous device isolation -[2023-08-18 06:49:42,412][pytorch][INFO] - + Disabling gradients -[2023-08-18 06:49:42,413][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 06:49:43,042][pytorch][INFO] - + Turning on eval mode -[2023-08-18 06:49:43,043][inference][INFO] - Running inference benchmark -[2023-08-18 06:49:43,166][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 06:49:43,168][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 06:49:43,225][inference][INFO] - + Forward pass peak memory: 466.39104 (MB) -[2023-08-18 06:49:43,226][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 06:49:43,228][inference][INFO] - + Warming up the forward pass -[2023-08-18 06:49:43,265][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 06:49:48,315][inference][INFO] - + Forward pass latency: 3.14e-03 (s) -[2023-08-18 06:49:48,317][inference][INFO] - + Forward pass throughput: 318.00 (samples/s) -[2023-08-18 06:49:48,317][inference][INFO] - Saving inference results -[2023-08-18 06:49:48,328][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 23ca4422003bcf5d318c343b1974c9d3df3307a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 29fc67366da26a90725d7be56383f2f36cdfc068..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.49696,0.00348,1150.0 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4e75da1e26f8e5d2eef9361f69b90f17708fb080..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 06:49:48,732][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 06:49:48,733][benchmark][INFO] - + Setting seed(42) -[2023-08-18 06:49:49,180][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 06:49:49,181][backend][INFO] - Configuring pytorch backend -[2023-08-18 06:49:49,181][backend][INFO] - + Checking initial device isolation -[2023-08-18 06:49:49,181][backend][INFO] - + Checking contineous device isolation -[2023-08-18 06:49:49,181][pytorch][INFO] - + Disabling gradients -[2023-08-18 06:49:49,181][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 06:49:49,299][pytorch][INFO] - + Turning on eval mode -[2023-08-18 06:49:49,299][inference][INFO] - Running inference benchmark -[2023-08-18 06:49:49,417][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 06:49:49,418][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 06:49:49,461][inference][INFO] - + Forward pass peak memory: 467.49696 (MB) -[2023-08-18 06:49:49,462][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 06:49:49,464][inference][INFO] - + Warming up the forward pass -[2023-08-18 06:49:49,500][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 06:49:54,546][inference][INFO] - + Forward pass latency: 3.48e-03 (s) -[2023-08-18 06:49:54,547][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-18 06:49:54,547][inference][INFO] - Saving inference results -[2023-08-18 06:49:54,554][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 70cb4f2359a940c635a3609a19bab05e2e94a067..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 93f4158f291865f0512473b5640d1157773249b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.688896,0.00315,317.0,0.482,207.0 diff --git a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8ab758f5476c41751e2396d9f8434b67e3326b9b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_05:58:59_659ab0423e6492b079d3df131445a39dda0651cb/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 06:49:59,293][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 06:49:59,294][benchmark][INFO] - + Setting seed(42) -[2023-08-18 06:50:00,728][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 06:50:00,729][backend][INFO] - Configuring pytorch backend -[2023-08-18 06:50:00,729][backend][INFO] - + Checking initial device isolation -[2023-08-18 06:50:00,729][backend][INFO] - + Checking contineous device isolation -[2023-08-18 06:50:00,729][pytorch][INFO] - + Disabling gradients -[2023-08-18 06:50:00,729][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 06:50:01,407][pytorch][INFO] - + Turning on eval mode -[2023-08-18 06:50:01,408][inference][INFO] - Running inference benchmark -[2023-08-18 06:50:01,614][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 06:50:01,663][inference][INFO] - + Forward pass peak memory: 468.688896 (MB) -[2023-08-18 06:50:01,664][inference][INFO] - + Warming up the forward pass -[2023-08-18 06:50:01,699][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 06:50:06,754][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-18 06:50:06,755][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-18 06:50:06,756][inference][INFO] - + Warming up the generation pass -[2023-08-18 06:50:07,247][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 06:50:12,554][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-18 06:50:12,555][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-18 06:50:12,555][inference][INFO] - Saving inference results -[2023-08-18 06:50:12,566][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1598fd65fb69334d94f1ea3ae2f45fe8d279bd0f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e48723d4d5eaabb63cb114ff2679a74423912e59..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.97152,0.00316,316.0 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/main.log deleted file mode 100644 index 7d9d70a01d7a636c643202cbc54652f26fae968c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 08:49:59,789][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:49:59,790][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:50:01,022][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 08:50:01,022][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:50:01,023][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:50:01,023][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:50:01,023][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:50:01,023][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:50:01,645][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:50:01,645][inference][INFO] - Running inference benchmark -[2023-08-18 08:50:01,759][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:50:01,760][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:50:01,818][inference][INFO] - + Forward pass peak memory: 468.97152 (MB) -[2023-08-18 08:50:01,819][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:50:01,823][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:50:01,870][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:50:06,923][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-18 08:50:06,924][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-18 08:50:06,924][inference][INFO] - Saving inference results -[2023-08-18 08:50:06,934][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2d4f2354b3c0fb3945ab9dd37944567001146e0a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3d9c99181f2882a40da700288dbd86abbd3ba8d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,470.102016,0.00339,1180.0 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/main.log deleted file mode 100644 index ec8f611f8d9176a118b4fc4ff4b0e00c1ca7adff..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 08:50:07,309][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:50:07,310][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:50:07,737][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 08:50:07,737][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:50:07,737][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:50:07,737][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:50:07,738][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:50:07,738][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:50:07,846][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:50:07,847][inference][INFO] - Running inference benchmark -[2023-08-18 08:50:07,976][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:50:07,977][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:50:08,019][inference][INFO] - + Forward pass peak memory: 470.102016 (MB) -[2023-08-18 08:50:08,020][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:50:08,022][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:50:08,057][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:50:13,105][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-08-18 08:50:13,106][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-18 08:50:13,106][inference][INFO] - Saving inference results -[2023-08-18 08:50:13,114][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7d09e87b16eae503ad03e0d623e8346a6563f192..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 1c9e08cbeedb34ce88d1c60115dbe7497201b52b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.17632,0.00372,269.0,0.497,201.0 diff --git a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ccf9a7541e2bcbdbad2aad5e6ab3e1adf781e09d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:17:44_08e32519f8eeeb2e08a0ed8626a5f97b766bb2e8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 08:50:17,933][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:50:17,934][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:50:19,341][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 08:50:19,341][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:50:19,342][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:50:19,342][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:50:19,342][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:50:19,342][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:50:20,008][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:50:20,009][inference][INFO] - Running inference benchmark -[2023-08-18 08:50:20,209][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:50:20,257][inference][INFO] - + Forward pass peak memory: 469.17632 (MB) -[2023-08-18 08:50:20,258][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:50:20,290][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:50:25,338][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-08-18 08:50:25,340][inference][INFO] - + Forward pass throughput: 269.00 (samples/s) -[2023-08-18 08:50:25,340][inference][INFO] - + Warming up the generation pass -[2023-08-18 08:50:25,834][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 08:50:31,300][inference][INFO] - + Generation pass latency: 4.97e-01 (s) -[2023-08-18 08:50:31,301][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-08-18 08:50:31,301][inference][INFO] - Saving inference results -[2023-08-18 08:50:31,313][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 63e80ba7be15d6ddd553e231f85ce735de9d114d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8fefbf9b0a36f64b4749581066e7486e20a0e3b1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.075648,0.00323,310.0 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/main.log deleted file mode 100644 index fab1eaaeb0da5dcfc485b9dadef1fee914de5332..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 08:51:34,125][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:51:34,126][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:51:35,598][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 08:51:35,598][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:51:35,598][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:51:35,598][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:51:35,599][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:51:35,599][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:51:36,229][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:51:36,229][inference][INFO] - Running inference benchmark -[2023-08-18 08:51:36,350][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:51:36,352][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:51:36,410][inference][INFO] - + Forward pass peak memory: 466.075648 (MB) -[2023-08-18 08:51:36,412][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:51:36,413][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:51:36,446][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:51:41,496][inference][INFO] - + Forward pass latency: 3.23e-03 (s) -[2023-08-18 08:51:41,498][inference][INFO] - + Forward pass throughput: 310.00 (samples/s) -[2023-08-18 08:51:41,498][inference][INFO] - Saving inference results -[2023-08-18 08:51:41,511][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cfeb4154f349ba94af1355f47362aad2c24f0d1c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d30bf15f19916dd289c9e7ffc780360e3f8028fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.202048,0.00378,1060.0 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/main.log deleted file mode 100644 index 3d22970dfaabd844834806d26472499fd738cdc5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 08:51:41,895][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:51:41,896][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:51:42,436][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 08:51:42,436][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:51:42,436][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:51:42,436][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:51:42,437][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:51:42,437][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:51:42,551][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:51:42,552][inference][INFO] - Running inference benchmark -[2023-08-18 08:51:42,678][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:51:42,679][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:51:42,724][inference][INFO] - + Forward pass peak memory: 467.202048 (MB) -[2023-08-18 08:51:42,724][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:51:42,726][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:51:42,779][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:51:47,822][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-18 08:51:47,823][inference][INFO] - + Forward pass throughput: 1060.00 (samples/s) -[2023-08-18 08:51:47,823][inference][INFO] - Saving inference results -[2023-08-18 08:51:47,830][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index fc9e3c3864179f6f9e813c371629d6e76086d7fe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 93202200b986426927efaf79cf5d62d9241b1862..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.250048,0.00378,265.0,0.511,196.0 diff --git a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 1a41945bedcfeee718b90420f4e09800809cce4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:28_940d1a76b0f2ebf98b18326bffc7e4bfc8c416d7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 08:51:52,581][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:51:52,582][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:51:53,985][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 08:51:53,985][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:51:53,985][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:51:53,985][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:51:53,986][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:51:53,986][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:51:54,641][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:51:54,641][inference][INFO] - Running inference benchmark -[2023-08-18 08:51:54,838][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:51:54,881][inference][INFO] - + Forward pass peak memory: 469.250048 (MB) -[2023-08-18 08:51:54,882][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:51:54,913][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:51:59,958][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-18 08:51:59,960][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-18 08:51:59,960][inference][INFO] - + Warming up the generation pass -[2023-08-18 08:52:00,545][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 08:52:05,659][inference][INFO] - + Generation pass latency: 5.11e-01 (s) -[2023-08-18 08:52:05,660][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-08-18 08:52:05,660][inference][INFO] - Saving inference results -[2023-08-18 08:52:05,672][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0b725cc01b8c83eba705a0c8a329e6eacad113c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index be464cb3655fb6f269a4b7767c204f7584bd6c5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.88255999999996,0.00305,328.0 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/main.log deleted file mode 100644 index 750a5b8d3e6e946b8ed45ff39d864338e99fba67..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 08:53:06,846][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:53:06,847][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:53:08,077][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 08:53:08,078][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:53:08,078][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:53:08,078][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:53:08,078][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:53:08,079][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:53:08,773][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:53:08,773][inference][INFO] - Running inference benchmark -[2023-08-18 08:53:08,891][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:53:08,892][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:53:08,949][inference][INFO] - + Forward pass peak memory: 466.88255999999996 (MB) -[2023-08-18 08:53:08,951][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:53:08,952][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:53:08,990][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:53:14,041][inference][INFO] - + Forward pass latency: 3.05e-03 (s) -[2023-08-18 08:53:14,043][inference][INFO] - + Forward pass throughput: 328.00 (samples/s) -[2023-08-18 08:53:14,043][inference][INFO] - Saving inference results -[2023-08-18 08:53:14,054][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f1d61b5532a6da3f3042d37e3e67ed26eeeb68a2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 186e330bc1b693cfd32e16b075c661e3bafd65d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.00896,0.00338,1180.0 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/main.log deleted file mode 100644 index 06e4bee78a9182754a243f58a31224b62bdcd1ee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 08:53:14,452][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:53:14,454][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:53:14,900][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 08:53:14,901][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:53:14,901][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:53:14,901][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:53:14,901][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:53:14,901][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:53:15,010][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:53:15,011][inference][INFO] - Running inference benchmark -[2023-08-18 08:53:15,134][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:53:15,135][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:53:15,177][inference][INFO] - + Forward pass peak memory: 468.00896 (MB) -[2023-08-18 08:53:15,178][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 08:53:15,179][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:53:15,215][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:53:20,263][inference][INFO] - + Forward pass latency: 3.38e-03 (s) -[2023-08-18 08:53:20,264][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-18 08:53:20,264][inference][INFO] - Saving inference results -[2023-08-18 08:53:20,272][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ce1cf41a93477059ba803355a7bc07340c3772aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 6d994f62f47841a165fc6fda685a680149c25742..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.004288,0.00373,268.0,0.519,193.0 diff --git a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a8750fed0bb06832b273c49622a5c29ad4a4c4d7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_08:32:46_c45aab75356563dbb8124aafbc2699853e177873/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 08:53:25,084][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 08:53:25,084][benchmark][INFO] - + Setting seed(42) -[2023-08-18 08:53:26,510][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 08:53:26,511][backend][INFO] - Configuring pytorch backend -[2023-08-18 08:53:26,511][backend][INFO] - + Checking initial device isolation -[2023-08-18 08:53:26,511][backend][INFO] - + Checking contineous device isolation -[2023-08-18 08:53:26,512][pytorch][INFO] - + Disabling gradients -[2023-08-18 08:53:26,512][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 08:53:27,145][pytorch][INFO] - + Turning on eval mode -[2023-08-18 08:53:27,145][inference][INFO] - Running inference benchmark -[2023-08-18 08:53:27,336][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 08:53:27,385][inference][INFO] - + Forward pass peak memory: 469.004288 (MB) -[2023-08-18 08:53:27,386][inference][INFO] - + Warming up the forward pass -[2023-08-18 08:53:27,418][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 08:53:32,465][inference][INFO] - + Forward pass latency: 3.73e-03 (s) -[2023-08-18 08:53:32,467][inference][INFO] - + Forward pass throughput: 268.00 (samples/s) -[2023-08-18 08:53:32,467][inference][INFO] - + Warming up the generation pass -[2023-08-18 08:53:33,049][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 08:53:38,237][inference][INFO] - + Generation pass latency: 5.19e-01 (s) -[2023-08-18 08:53:38,238][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-18 08:53:38,238][inference][INFO] - Saving inference results -[2023-08-18 08:53:38,253][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 721959e32684ff553aaee272fcd680a474a8bf0e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0fa09d6348fd5012b4b5731dd1c3effc3a928640..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.804736,0.00379,264.0 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/main.log deleted file mode 100644 index 6aa03cef97383a8b07e250e7fbf9bbdf207ab611..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 10:50:00,380][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 10:50:00,380][benchmark][INFO] - + Setting seed(42) -[2023-08-18 10:50:01,579][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 10:50:01,580][backend][INFO] - Configuring pytorch backend -[2023-08-18 10:50:01,580][backend][INFO] - + Checking initial device isolation -[2023-08-18 10:50:01,580][backend][INFO] - + Checking contineous device isolation -[2023-08-18 10:50:01,580][pytorch][INFO] - + Disabling gradients -[2023-08-18 10:50:01,580][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 10:50:02,188][pytorch][INFO] - + Turning on eval mode -[2023-08-18 10:50:02,189][inference][INFO] - Running inference benchmark -[2023-08-18 10:50:02,311][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 10:50:02,313][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 10:50:02,369][inference][INFO] - + Forward pass peak memory: 466.804736 (MB) -[2023-08-18 10:50:02,370][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 10:50:02,372][inference][INFO] - + Warming up the forward pass -[2023-08-18 10:50:02,405][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 10:50:07,453][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-18 10:50:07,455][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-18 10:50:07,455][inference][INFO] - Saving inference results -[2023-08-18 10:50:07,468][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 745475ee766ce7feec1504cfb776a7339caab4ef..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 79aaf6b6d754f4910162a2c9dfe23ea3bba9fef8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.828736,0.00427,937.0 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2ae57e72604487e552381002adb4a74735bf483e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 10:50:07,854][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 10:50:07,855][benchmark][INFO] - + Setting seed(42) -[2023-08-18 10:50:08,293][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 10:50:08,293][backend][INFO] - Configuring pytorch backend -[2023-08-18 10:50:08,293][backend][INFO] - + Checking initial device isolation -[2023-08-18 10:50:08,294][backend][INFO] - + Checking contineous device isolation -[2023-08-18 10:50:08,294][pytorch][INFO] - + Disabling gradients -[2023-08-18 10:50:08,294][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 10:50:08,412][pytorch][INFO] - + Turning on eval mode -[2023-08-18 10:50:08,413][inference][INFO] - Running inference benchmark -[2023-08-18 10:50:08,540][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 10:50:08,541][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 10:50:08,584][inference][INFO] - + Forward pass peak memory: 467.828736 (MB) -[2023-08-18 10:50:08,585][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 10:50:08,587][inference][INFO] - + Warming up the forward pass -[2023-08-18 10:50:08,631][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 10:50:13,670][inference][INFO] - + Forward pass latency: 4.27e-03 (s) -[2023-08-18 10:50:13,671][inference][INFO] - + Forward pass throughput: 937.00 (samples/s) -[2023-08-18 10:50:13,671][inference][INFO] - Saving inference results -[2023-08-18 10:50:13,679][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 01e1df919a3e34241e82363044a23304e62eaff2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 97dc05ff3528dbd2038fdadea47e1559b207c45b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.250048,0.00371,270.0,0.504,198.0 diff --git a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a139110e21a86b460ec2e50b0f4fad028a66fa9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_10:40:40_9d7afd2536ecd9816dd2ea9592a01e52fec17d17/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 10:50:18,378][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 10:50:18,378][benchmark][INFO] - + Setting seed(42) -[2023-08-18 10:50:19,801][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 10:50:19,801][backend][INFO] - Configuring pytorch backend -[2023-08-18 10:50:19,801][backend][INFO] - + Checking initial device isolation -[2023-08-18 10:50:19,802][backend][INFO] - + Checking contineous device isolation -[2023-08-18 10:50:19,802][pytorch][INFO] - + Disabling gradients -[2023-08-18 10:50:19,802][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 10:50:20,437][pytorch][INFO] - + Turning on eval mode -[2023-08-18 10:50:20,438][inference][INFO] - Running inference benchmark -[2023-08-18 10:50:20,782][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 10:50:20,833][inference][INFO] - + Forward pass peak memory: 469.250048 (MB) -[2023-08-18 10:50:20,834][inference][INFO] - + Warming up the forward pass -[2023-08-18 10:50:20,867][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 10:50:25,912][inference][INFO] - + Forward pass latency: 3.71e-03 (s) -[2023-08-18 10:50:25,914][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-08-18 10:50:25,914][inference][INFO] - + Warming up the generation pass -[2023-08-18 10:50:26,499][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 10:50:32,048][inference][INFO] - + Generation pass latency: 5.04e-01 (s) -[2023-08-18 10:50:32,049][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s) -[2023-08-18 10:50:32,049][inference][INFO] - Saving inference results -[2023-08-18 10:50:32,062][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5d8258b76dade458537d303fa5490da6413a2c71..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 7730c7bc8c5ef795acfb6c6f6e57b09aca44ee24..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.324928,0.0039,256.0 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/main.log deleted file mode 100644 index b393a29a9c3fbeb41131d260953284a4e92a2b40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 12:57:03,594][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 12:57:03,595][benchmark][INFO] - + Setting seed(42) -[2023-08-18 12:57:04,980][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 12:57:04,981][backend][INFO] - Configuring pytorch backend -[2023-08-18 12:57:04,981][backend][INFO] - + Checking initial device isolation -[2023-08-18 12:57:04,981][backend][INFO] - + Checking contineous device isolation -[2023-08-18 12:57:04,981][pytorch][INFO] - + Disabling gradients -[2023-08-18 12:57:04,981][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 12:57:05,605][pytorch][INFO] - + Turning on eval mode -[2023-08-18 12:57:05,605][inference][INFO] - Running inference benchmark -[2023-08-18 12:57:05,721][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:57:05,723][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 12:57:05,780][inference][INFO] - + Forward pass peak memory: 467.324928 (MB) -[2023-08-18 12:57:05,781][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:57:05,783][inference][INFO] - + Warming up the forward pass -[2023-08-18 12:57:05,824][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 12:57:10,870][inference][INFO] - + Forward pass latency: 3.90e-03 (s) -[2023-08-18 12:57:10,872][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-08-18 12:57:10,872][inference][INFO] - Saving inference results -[2023-08-18 12:57:10,882][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index dd04f99c4ac181d66954b3d4f2bf79cd989f97f7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 87d3646989cb9208a2b9a39749d49b3693edf9cc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.279296,0.00339,1180.0 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 0d1c2139523f42cd2789351a80b827ff49be18ee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 12:57:11,263][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 12:57:11,264][benchmark][INFO] - + Setting seed(42) -[2023-08-18 12:57:11,702][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 12:57:11,702][backend][INFO] - Configuring pytorch backend -[2023-08-18 12:57:11,703][backend][INFO] - + Checking initial device isolation -[2023-08-18 12:57:11,703][backend][INFO] - + Checking contineous device isolation -[2023-08-18 12:57:11,703][pytorch][INFO] - + Disabling gradients -[2023-08-18 12:57:11,703][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 12:57:11,820][pytorch][INFO] - + Turning on eval mode -[2023-08-18 12:57:11,821][inference][INFO] - Running inference benchmark -[2023-08-18 12:57:11,944][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:57:11,945][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 12:57:11,988][inference][INFO] - + Forward pass peak memory: 468.279296 (MB) -[2023-08-18 12:57:11,989][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:57:11,991][inference][INFO] - + Warming up the forward pass -[2023-08-18 12:57:12,026][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 12:57:17,072][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-08-18 12:57:17,073][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-18 12:57:17,073][inference][INFO] - Saving inference results -[2023-08-18 12:57:17,082][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 3959b818e46c135a7d90b971bcb77f26bb3db411..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 99ff0b5c184af8887ad5c8887d6d559dc562844c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.05753599999997,0.00335,299.0,0.482,207.0 diff --git a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f3fa5a7966d4b2fa2f5398823fbfdc65d9c4dae1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:26:27_30b3c46ff5a7c9761a800a9ab4bcf8cdb206727e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 12:57:21,847][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 12:57:21,848][benchmark][INFO] - + Setting seed(42) -[2023-08-18 12:57:23,277][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 12:57:23,277][backend][INFO] - Configuring pytorch backend -[2023-08-18 12:57:23,278][backend][INFO] - + Checking initial device isolation -[2023-08-18 12:57:23,278][backend][INFO] - + Checking contineous device isolation -[2023-08-18 12:57:23,278][pytorch][INFO] - + Disabling gradients -[2023-08-18 12:57:23,278][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 12:57:23,925][pytorch][INFO] - + Turning on eval mode -[2023-08-18 12:57:23,925][inference][INFO] - Running inference benchmark -[2023-08-18 12:57:24,124][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 12:57:24,182][inference][INFO] - + Forward pass peak memory: 469.05753599999997 (MB) -[2023-08-18 12:57:24,183][inference][INFO] - + Warming up the forward pass -[2023-08-18 12:57:24,217][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 12:57:29,265][inference][INFO] - + Forward pass latency: 3.35e-03 (s) -[2023-08-18 12:57:29,267][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-18 12:57:29,268][inference][INFO] - + Warming up the generation pass -[2023-08-18 12:57:29,760][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 12:57:35,063][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-18 12:57:35,064][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-18 12:57:35,065][inference][INFO] - Saving inference results -[2023-08-18 12:57:35,077][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5a9d4c8883494382b4f32f798f3780a66d0449e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8ea724560f5150d3aca487ed3f2932dcd5ffde54..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.603456,0.00364,275.0 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/main.log deleted file mode 100644 index 619f77df99d35f4b939dd8c90f2a3bece9228a79..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 12:58:39,782][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 12:58:39,783][benchmark][INFO] - + Setting seed(42) -[2023-08-18 12:58:41,094][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 12:58:41,095][backend][INFO] - Configuring pytorch backend -[2023-08-18 12:58:41,095][backend][INFO] - + Checking initial device isolation -[2023-08-18 12:58:41,095][backend][INFO] - + Checking contineous device isolation -[2023-08-18 12:58:41,095][pytorch][INFO] - + Disabling gradients -[2023-08-18 12:58:41,095][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 12:58:41,693][pytorch][INFO] - + Turning on eval mode -[2023-08-18 12:58:41,693][inference][INFO] - Running inference benchmark -[2023-08-18 12:58:41,811][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:58:41,813][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 12:58:41,871][inference][INFO] - + Forward pass peak memory: 467.603456 (MB) -[2023-08-18 12:58:41,872][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:58:41,873][inference][INFO] - + Warming up the forward pass -[2023-08-18 12:58:41,905][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 12:58:46,953][inference][INFO] - + Forward pass latency: 3.64e-03 (s) -[2023-08-18 12:58:46,954][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-08-18 12:58:46,955][inference][INFO] - Saving inference results -[2023-08-18 12:58:46,967][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4f2d895ea4f7c85476460eb5fb1cd459238d4e7c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3570a0b99dd9e2c4e629d56e33be99d0a52ea061..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.64383999999995,0.00412,971.0 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/main.log deleted file mode 100644 index 5938b73beea9ed6ebf50299f7a00e019481cbcdd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 12:58:47,349][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 12:58:47,350][benchmark][INFO] - + Setting seed(42) -[2023-08-18 12:58:47,781][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 12:58:47,781][backend][INFO] - Configuring pytorch backend -[2023-08-18 12:58:47,781][backend][INFO] - + Checking initial device isolation -[2023-08-18 12:58:47,781][backend][INFO] - + Checking contineous device isolation -[2023-08-18 12:58:47,782][pytorch][INFO] - + Disabling gradients -[2023-08-18 12:58:47,782][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 12:58:47,899][pytorch][INFO] - + Turning on eval mode -[2023-08-18 12:58:47,900][inference][INFO] - Running inference benchmark -[2023-08-18 12:58:48,039][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:58:48,041][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 12:58:48,086][inference][INFO] - + Forward pass peak memory: 468.64383999999995 (MB) -[2023-08-18 12:58:48,087][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 12:58:48,089][inference][INFO] - + Warming up the forward pass -[2023-08-18 12:58:48,131][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 12:58:53,172][inference][INFO] - + Forward pass latency: 4.12e-03 (s) -[2023-08-18 12:58:53,173][inference][INFO] - + Forward pass throughput: 971.00 (samples/s) -[2023-08-18 12:58:53,173][inference][INFO] - Saving inference results -[2023-08-18 12:58:53,182][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 148028845ad2cac92f3adbd933dd0bbb1815130b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2b4f85da956ec332e27d1869a4a14ac98d22a15c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.844544,0.00306,327.0,0.481,208.0 diff --git a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 653db73eade202a3e8fefc08c8177ac32b545a5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_11:39:23_bc3e20dcf08a03e22a0e4a42a0ce5a8ec94180e5/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 12:58:57,972][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 12:58:57,972][benchmark][INFO] - + Setting seed(42) -[2023-08-18 12:58:59,533][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 12:58:59,533][backend][INFO] - Configuring pytorch backend -[2023-08-18 12:58:59,534][backend][INFO] - + Checking initial device isolation -[2023-08-18 12:58:59,534][backend][INFO] - + Checking contineous device isolation -[2023-08-18 12:58:59,534][pytorch][INFO] - + Disabling gradients -[2023-08-18 12:58:59,534][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 12:59:00,363][pytorch][INFO] - + Turning on eval mode -[2023-08-18 12:59:00,363][inference][INFO] - Running inference benchmark -[2023-08-18 12:59:00,561][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 12:59:00,605][inference][INFO] - + Forward pass peak memory: 468.844544 (MB) -[2023-08-18 12:59:00,606][inference][INFO] - + Warming up the forward pass -[2023-08-18 12:59:00,638][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 12:59:05,690][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-18 12:59:05,692][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-18 12:59:05,692][inference][INFO] - + Warming up the generation pass -[2023-08-18 12:59:06,176][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 12:59:11,469][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-18 12:59:11,470][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-18 12:59:11,470][inference][INFO] - Saving inference results -[2023-08-18 12:59:11,482][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b140429b9cdda6b17725e0fcc91fc53301b94bd0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 668b99b3998060b83208e11e8fa2f075f088d700..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.84159999999997,0.0036,278.0 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/main.log deleted file mode 100644 index f4dd85885a414ae8ec0f4c5fdccc9cb93dc85923..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 13:00:15,277][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 13:00:15,278][benchmark][INFO] - + Setting seed(42) -[2023-08-18 13:00:16,668][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 13:00:16,668][backend][INFO] - Configuring pytorch backend -[2023-08-18 13:00:16,668][backend][INFO] - + Checking initial device isolation -[2023-08-18 13:00:16,669][backend][INFO] - + Checking contineous device isolation -[2023-08-18 13:00:16,669][pytorch][INFO] - + Disabling gradients -[2023-08-18 13:00:16,669][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 13:00:17,281][pytorch][INFO] - + Turning on eval mode -[2023-08-18 13:00:17,281][inference][INFO] - Running inference benchmark -[2023-08-18 13:00:17,402][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 13:00:17,403][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 13:00:17,461][inference][INFO] - + Forward pass peak memory: 466.84159999999997 (MB) -[2023-08-18 13:00:17,462][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 13:00:17,464][inference][INFO] - + Warming up the forward pass -[2023-08-18 13:00:17,495][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 13:00:22,543][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-08-18 13:00:22,544][inference][INFO] - + Forward pass throughput: 278.00 (samples/s) -[2023-08-18 13:00:22,544][inference][INFO] - Saving inference results -[2023-08-18 13:00:22,555][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ee7aa35faa589817573609f21d029e2761d69002..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 161302a9ff728451cc2515138d3f50d6bc5d0eee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.91475199999996,0.00416,962.0 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/main.log deleted file mode 100644 index dd48ec875243bd8d4bebab83d96ba5338b27e670..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 13:00:22,933][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 13:00:22,934][benchmark][INFO] - + Setting seed(42) -[2023-08-18 13:00:23,384][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 13:00:23,385][backend][INFO] - Configuring pytorch backend -[2023-08-18 13:00:23,385][backend][INFO] - + Checking initial device isolation -[2023-08-18 13:00:23,385][backend][INFO] - + Checking contineous device isolation -[2023-08-18 13:00:23,385][pytorch][INFO] - + Disabling gradients -[2023-08-18 13:00:23,385][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 13:00:23,512][pytorch][INFO] - + Turning on eval mode -[2023-08-18 13:00:23,513][inference][INFO] - Running inference benchmark -[2023-08-18 13:00:23,645][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 13:00:23,646][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 13:00:23,689][inference][INFO] - + Forward pass peak memory: 467.91475199999996 (MB) -[2023-08-18 13:00:23,690][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 13:00:23,692][inference][INFO] - + Warming up the forward pass -[2023-08-18 13:00:23,734][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 13:00:28,777][inference][INFO] - + Forward pass latency: 4.16e-03 (s) -[2023-08-18 13:00:28,778][inference][INFO] - + Forward pass throughput: 962.00 (samples/s) -[2023-08-18 13:00:28,778][inference][INFO] - Saving inference results -[2023-08-18 13:00:28,786][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b0fd9d25b1912c25cc1dbf45a2a041bfc20a7da9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 85b578969e790086cdbcfe3bb07e01d298459953..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.27462399999996,0.00372,269.0,0.563,178.0 diff --git a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9ef5d5a95c043ae82fabc688bc8f43ddaf4ab69b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_12:27:16_8d2f953f4a59a6a6f337a75ef75bb8a78260ef73/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 13:00:33,640][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 13:00:33,641][benchmark][INFO] - + Setting seed(42) -[2023-08-18 13:00:35,135][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 13:00:35,135][backend][INFO] - Configuring pytorch backend -[2023-08-18 13:00:35,136][backend][INFO] - + Checking initial device isolation -[2023-08-18 13:00:35,136][backend][INFO] - + Checking contineous device isolation -[2023-08-18 13:00:35,136][pytorch][INFO] - + Disabling gradients -[2023-08-18 13:00:35,136][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 13:00:35,763][pytorch][INFO] - + Turning on eval mode -[2023-08-18 13:00:35,763][inference][INFO] - Running inference benchmark -[2023-08-18 13:00:35,966][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 13:00:36,016][inference][INFO] - + Forward pass peak memory: 469.27462399999996 (MB) -[2023-08-18 13:00:36,017][inference][INFO] - + Warming up the forward pass -[2023-08-18 13:00:36,049][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 13:00:41,094][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-08-18 13:00:41,095][inference][INFO] - + Forward pass throughput: 269.00 (samples/s) -[2023-08-18 13:00:41,096][inference][INFO] - + Warming up the generation pass -[2023-08-18 13:00:41,683][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 13:00:46,751][inference][INFO] - + Generation pass latency: 5.63e-01 (s) -[2023-08-18 13:00:46,752][inference][INFO] - + Generation pass throughput: 178.00 (tokens/s) -[2023-08-18 13:00:46,752][inference][INFO] - Saving inference results -[2023-08-18 13:00:46,764][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 02e9a22fe226cce3260982e184036d89718e32b8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 792cca36298862d63e61ba513b8449ad3befbd5e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.262912,0.00364,275.0 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 01cb4c6e3821ea7fb0fbdb428007588480c85881..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 14:49:52,747][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 14:49:52,747][benchmark][INFO] - + Setting seed(42) -[2023-08-18 14:49:55,032][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 14:49:55,032][backend][INFO] - Configuring pytorch backend -[2023-08-18 14:49:55,033][backend][INFO] - + Checking initial device isolation -[2023-08-18 14:49:55,033][backend][INFO] - + Checking contineous device isolation -[2023-08-18 14:49:55,033][pytorch][INFO] - + Disabling gradients -[2023-08-18 14:49:55,033][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 14:49:55,929][pytorch][INFO] - + Turning on eval mode -[2023-08-18 14:49:55,930][inference][INFO] - Running inference benchmark -[2023-08-18 14:49:56,122][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 14:49:56,124][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 14:49:56,180][inference][INFO] - + Forward pass peak memory: 468.262912 (MB) -[2023-08-18 14:49:56,181][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 14:49:56,183][inference][INFO] - + Warming up the forward pass -[2023-08-18 14:49:56,218][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 14:50:01,266][inference][INFO] - + Forward pass latency: 3.64e-03 (s) -[2023-08-18 14:50:01,267][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-08-18 14:50:01,267][inference][INFO] - Saving inference results -[2023-08-18 14:50:01,277][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 452415b2670d2ddef07887130a5e08d36feea39b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1c12c405bc74660a0f558f87f767d961c7d3ad96..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.315584,0.00336,1190.0 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 076fb73949fe6ef663e34a84af4f8cec4ad5a129..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 14:50:01,724][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 14:50:01,724][benchmark][INFO] - + Setting seed(42) -[2023-08-18 14:50:02,561][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 14:50:02,561][backend][INFO] - Configuring pytorch backend -[2023-08-18 14:50:02,562][backend][INFO] - + Checking initial device isolation -[2023-08-18 14:50:02,562][backend][INFO] - + Checking contineous device isolation -[2023-08-18 14:50:02,562][pytorch][INFO] - + Disabling gradients -[2023-08-18 14:50:02,562][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 14:50:02,750][pytorch][INFO] - + Turning on eval mode -[2023-08-18 14:50:02,751][inference][INFO] - Running inference benchmark -[2023-08-18 14:50:02,942][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 14:50:02,944][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 14:50:02,981][inference][INFO] - + Forward pass peak memory: 469.315584 (MB) -[2023-08-18 14:50:02,982][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 14:50:02,984][inference][INFO] - + Warming up the forward pass -[2023-08-18 14:50:03,018][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 14:50:08,066][inference][INFO] - + Forward pass latency: 3.36e-03 (s) -[2023-08-18 14:50:08,067][inference][INFO] - + Forward pass throughput: 1190.00 (samples/s) -[2023-08-18 14:50:08,068][inference][INFO] - Saving inference results -[2023-08-18 14:50:08,075][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 3e862de2256526f28c687784fbf1ff5271766892..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e7306ec6054991bc9d4670d7424005c71f9a3a89..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.81587199999996,0.00452,221.0,0.494,202.0 diff --git a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 253bdcdcc8f3f7e2bb62e986ac2948466af82744..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_13:57:17_636acc75b089aa3ce14b48ed3d9d6555565d1a6d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 14:50:12,821][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 14:50:12,821][benchmark][INFO] - + Setting seed(42) -[2023-08-18 14:50:14,488][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 14:50:14,488][backend][INFO] - Configuring pytorch backend -[2023-08-18 14:50:14,488][backend][INFO] - + Checking initial device isolation -[2023-08-18 14:50:14,488][backend][INFO] - + Checking contineous device isolation -[2023-08-18 14:50:14,488][pytorch][INFO] - + Disabling gradients -[2023-08-18 14:50:14,489][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 14:50:15,150][pytorch][INFO] - + Turning on eval mode -[2023-08-18 14:50:15,151][inference][INFO] - Running inference benchmark -[2023-08-18 14:50:15,377][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 14:50:15,422][inference][INFO] - + Forward pass peak memory: 468.81587199999996 (MB) -[2023-08-18 14:50:15,423][inference][INFO] - + Warming up the forward pass -[2023-08-18 14:50:15,460][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 14:50:20,506][inference][INFO] - + Forward pass latency: 4.52e-03 (s) -[2023-08-18 14:50:20,507][inference][INFO] - + Forward pass throughput: 221.00 (samples/s) -[2023-08-18 14:50:20,508][inference][INFO] - + Warming up the generation pass -[2023-08-18 14:50:21,079][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 14:50:26,510][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-08-18 14:50:26,511][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-08-18 14:50:26,511][inference][INFO] - Saving inference results -[2023-08-18 14:50:26,524][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6d4b0134bd7114349b65100511ddc29e0cf594e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 06cabff023ef2a819041097eee175076d24366c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.083264,0.00323,310.0 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5762123a686f09f418688ae24ce5eb5872a6d160..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 16:49:50,158][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 16:49:50,160][benchmark][INFO] - + Setting seed(42) -[2023-08-18 16:49:51,376][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 16:49:51,376][backend][INFO] - Configuring pytorch backend -[2023-08-18 16:49:51,376][backend][INFO] - + Checking initial device isolation -[2023-08-18 16:49:51,377][backend][INFO] - + Checking contineous device isolation -[2023-08-18 16:49:51,377][pytorch][INFO] - + Disabling gradients -[2023-08-18 16:49:51,377][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 16:49:52,165][pytorch][INFO] - + Turning on eval mode -[2023-08-18 16:49:52,166][inference][INFO] - Running inference benchmark -[2023-08-18 16:49:52,287][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 16:49:52,289][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 16:49:52,350][inference][INFO] - + Forward pass peak memory: 467.083264 (MB) -[2023-08-18 16:49:52,351][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 16:49:52,353][inference][INFO] - + Warming up the forward pass -[2023-08-18 16:49:52,390][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 16:49:57,439][inference][INFO] - + Forward pass latency: 3.23e-03 (s) -[2023-08-18 16:49:57,440][inference][INFO] - + Forward pass throughput: 310.00 (samples/s) -[2023-08-18 16:49:57,441][inference][INFO] - Saving inference results -[2023-08-18 16:49:57,451][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 9f8f4e234bcbfde582770bb7135dd65978720883..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 50f6151f43b83db76cef9f78e6a4bbf4f0572c94..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.19737599999996,0.00357,1120.0 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/main.log deleted file mode 100644 index a97b93ff98751c2f2a9b1b72aafe503ba0c21797..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 16:49:57,823][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 16:49:57,825][benchmark][INFO] - + Setting seed(42) -[2023-08-18 16:49:58,260][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 16:49:58,260][backend][INFO] - Configuring pytorch backend -[2023-08-18 16:49:58,261][backend][INFO] - + Checking initial device isolation -[2023-08-18 16:49:58,261][backend][INFO] - + Checking contineous device isolation -[2023-08-18 16:49:58,261][pytorch][INFO] - + Disabling gradients -[2023-08-18 16:49:58,261][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 16:49:58,372][pytorch][INFO] - + Turning on eval mode -[2023-08-18 16:49:58,373][inference][INFO] - Running inference benchmark -[2023-08-18 16:49:58,492][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 16:49:58,494][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 16:49:58,535][inference][INFO] - + Forward pass peak memory: 468.19737599999996 (MB) -[2023-08-18 16:49:58,536][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 16:49:58,538][inference][INFO] - + Warming up the forward pass -[2023-08-18 16:49:58,575][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 16:50:03,619][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-08-18 16:50:03,620][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-18 16:50:03,620][inference][INFO] - Saving inference results -[2023-08-18 16:50:03,628][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 42952c792552f1ab474aac25f61c82eca1b390fd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index bceb0744aa1cccbe3667a440ff492df974e0967d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.783104,0.0038,263.0,0.532,188.0 diff --git a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a10c091e9a976b1cf96049e4d544f2c3a7531cd3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_16:09:50_ef1534252f76231b4a6403c71866d4376e35292d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 16:50:08,322][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 16:50:08,323][benchmark][INFO] - + Setting seed(42) -[2023-08-18 16:50:09,708][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 16:50:09,708][backend][INFO] - Configuring pytorch backend -[2023-08-18 16:50:09,708][backend][INFO] - + Checking initial device isolation -[2023-08-18 16:50:09,709][backend][INFO] - + Checking contineous device isolation -[2023-08-18 16:50:09,709][pytorch][INFO] - + Disabling gradients -[2023-08-18 16:50:09,709][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 16:50:10,346][pytorch][INFO] - + Turning on eval mode -[2023-08-18 16:50:10,347][inference][INFO] - Running inference benchmark -[2023-08-18 16:50:10,543][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 16:50:10,593][inference][INFO] - + Forward pass peak memory: 468.783104 (MB) -[2023-08-18 16:50:10,595][inference][INFO] - + Warming up the forward pass -[2023-08-18 16:50:10,628][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 16:50:15,673][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-18 16:50:15,674][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-18 16:50:15,675][inference][INFO] - + Warming up the generation pass -[2023-08-18 16:50:16,164][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 16:50:21,487][inference][INFO] - + Generation pass latency: 5.32e-01 (s) -[2023-08-18 16:50:21,488][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-08-18 16:50:21,488][inference][INFO] - Saving inference results -[2023-08-18 16:50:21,500][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 948b946c2c4b8fdb76c13c905b3fe015565509d0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 09d1552ad1466e433f13b572e35f09b9881bf375..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.980864,0.00358,279.0 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/main.log deleted file mode 100644 index 6cbdebc1725577d6fb5735fefb1f069818997352..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 18:49:50,941][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 18:49:50,942][benchmark][INFO] - + Setting seed(42) -[2023-08-18 18:49:52,151][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 18:49:52,151][backend][INFO] - Configuring pytorch backend -[2023-08-18 18:49:52,152][backend][INFO] - + Checking initial device isolation -[2023-08-18 18:49:52,152][backend][INFO] - + Checking contineous device isolation -[2023-08-18 18:49:52,152][pytorch][INFO] - + Disabling gradients -[2023-08-18 18:49:52,152][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 18:49:53,169][pytorch][INFO] - + Turning on eval mode -[2023-08-18 18:49:53,170][inference][INFO] - Running inference benchmark -[2023-08-18 18:49:53,290][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 18:49:53,292][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 18:49:53,349][inference][INFO] - + Forward pass peak memory: 466.980864 (MB) -[2023-08-18 18:49:53,351][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 18:49:53,352][inference][INFO] - + Warming up the forward pass -[2023-08-18 18:49:53,384][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 18:49:58,433][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-18 18:49:58,434][inference][INFO] - + Forward pass throughput: 279.00 (samples/s) -[2023-08-18 18:49:58,435][inference][INFO] - Saving inference results -[2023-08-18 18:49:58,446][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 04e1990a94882b7b811e8bb6ca988ff6ff9b18bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 92d3e8e782339e9629c3609fe0bffc51f1d48549..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.96390399999996,0.00409,978.0 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/main.log deleted file mode 100644 index 115e5fc06d12905546dd07dda404465baea0f307..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 18:49:58,815][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 18:49:58,816][benchmark][INFO] - + Setting seed(42) -[2023-08-18 18:49:59,283][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 18:49:59,284][backend][INFO] - Configuring pytorch backend -[2023-08-18 18:49:59,284][backend][INFO] - + Checking initial device isolation -[2023-08-18 18:49:59,284][backend][INFO] - + Checking contineous device isolation -[2023-08-18 18:49:59,284][pytorch][INFO] - + Disabling gradients -[2023-08-18 18:49:59,285][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 18:49:59,407][pytorch][INFO] - + Turning on eval mode -[2023-08-18 18:49:59,407][inference][INFO] - Running inference benchmark -[2023-08-18 18:49:59,533][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 18:49:59,534][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 18:49:59,576][inference][INFO] - + Forward pass peak memory: 467.96390399999996 (MB) -[2023-08-18 18:49:59,577][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 18:49:59,578][inference][INFO] - + Warming up the forward pass -[2023-08-18 18:49:59,625][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 18:50:04,668][inference][INFO] - + Forward pass latency: 4.09e-03 (s) -[2023-08-18 18:50:04,669][inference][INFO] - + Forward pass throughput: 978.00 (samples/s) -[2023-08-18 18:50:04,669][inference][INFO] - Saving inference results -[2023-08-18 18:50:04,677][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ea01a4dc10bd3afeb2acc7b992169393d27f7b0e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index ca58bb29b71feab531d7f77008af3f13121b74df..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.311488,0.00379,264.0,0.563,178.0 diff --git a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2f2d579b0b51b524e9db1e0ef674765e2a1b85b5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_17:08:03_faed2ca46fb163082d154aa234fd5d30682d6bf1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 18:50:09,676][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 18:50:09,677][benchmark][INFO] - + Setting seed(42) -[2023-08-18 18:50:11,171][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 18:50:11,171][backend][INFO] - Configuring pytorch backend -[2023-08-18 18:50:11,172][backend][INFO] - + Checking initial device isolation -[2023-08-18 18:50:11,172][backend][INFO] - + Checking contineous device isolation -[2023-08-18 18:50:11,172][pytorch][INFO] - + Disabling gradients -[2023-08-18 18:50:11,172][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 18:50:11,828][pytorch][INFO] - + Turning on eval mode -[2023-08-18 18:50:11,829][inference][INFO] - Running inference benchmark -[2023-08-18 18:50:12,025][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 18:50:12,073][inference][INFO] - + Forward pass peak memory: 469.311488 (MB) -[2023-08-18 18:50:12,075][inference][INFO] - + Warming up the forward pass -[2023-08-18 18:50:12,107][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 18:50:17,151][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-18 18:50:17,153][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-18 18:50:17,153][inference][INFO] - + Warming up the generation pass -[2023-08-18 18:50:17,741][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 18:50:22,810][inference][INFO] - + Generation pass latency: 5.63e-01 (s) -[2023-08-18 18:50:22,811][inference][INFO] - + Generation pass throughput: 178.00 (tokens/s) -[2023-08-18 18:50:22,811][inference][INFO] - Saving inference results -[2023-08-18 18:50:22,823][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ee362848e7d1c41e74bd39e51f4e4ac6b7ce5848..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 589ac96615ecdfab61471d1110070066fe11058a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06687999999997,0.00361,277.0 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/main.log deleted file mode 100644 index f076210c896417bc6b1d58af13c93259750dcc0b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 20:49:49,630][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 20:49:49,632][benchmark][INFO] - + Setting seed(42) -[2023-08-18 20:49:50,909][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 20:49:50,909][backend][INFO] - Configuring pytorch backend -[2023-08-18 20:49:50,909][backend][INFO] - + Checking initial device isolation -[2023-08-18 20:49:50,909][backend][INFO] - + Checking contineous device isolation -[2023-08-18 20:49:50,910][pytorch][INFO] - + Disabling gradients -[2023-08-18 20:49:50,910][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 20:49:51,530][pytorch][INFO] - + Turning on eval mode -[2023-08-18 20:49:51,531][inference][INFO] - Running inference benchmark -[2023-08-18 20:49:51,655][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 20:49:51,656][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 20:49:51,714][inference][INFO] - + Forward pass peak memory: 467.06687999999997 (MB) -[2023-08-18 20:49:51,715][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 20:49:51,716][inference][INFO] - + Warming up the forward pass -[2023-08-18 20:49:51,749][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 20:49:56,795][inference][INFO] - + Forward pass latency: 3.61e-03 (s) -[2023-08-18 20:49:56,796][inference][INFO] - + Forward pass throughput: 277.00 (samples/s) -[2023-08-18 20:49:56,796][inference][INFO] - Saving inference results -[2023-08-18 20:49:56,808][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4869c1ca82ab0ac7c9a5a3c329c9c0d9e516368b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 551dc00845ecb438fbd72e8007fe44ac9bf58399..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.103168,0.00422,948.0 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/main.log deleted file mode 100644 index d196bed7735fd9824839b8b7ce215f3a7197c99e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 20:49:57,177][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 20:49:57,178][benchmark][INFO] - + Setting seed(42) -[2023-08-18 20:49:57,628][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 20:49:57,629][backend][INFO] - Configuring pytorch backend -[2023-08-18 20:49:57,629][backend][INFO] - + Checking initial device isolation -[2023-08-18 20:49:57,629][backend][INFO] - + Checking contineous device isolation -[2023-08-18 20:49:57,629][pytorch][INFO] - + Disabling gradients -[2023-08-18 20:49:57,630][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 20:49:57,742][pytorch][INFO] - + Turning on eval mode -[2023-08-18 20:49:57,742][inference][INFO] - Running inference benchmark -[2023-08-18 20:49:57,870][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 20:49:57,871][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 20:49:57,915][inference][INFO] - + Forward pass peak memory: 468.103168 (MB) -[2023-08-18 20:49:57,916][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 20:49:57,918][inference][INFO] - + Warming up the forward pass -[2023-08-18 20:49:57,962][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 20:50:03,001][inference][INFO] - + Forward pass latency: 4.22e-03 (s) -[2023-08-18 20:50:03,002][inference][INFO] - + Forward pass throughput: 948.00 (samples/s) -[2023-08-18 20:50:03,002][inference][INFO] - Saving inference results -[2023-08-18 20:50:03,010][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9dd336a4aa200ca3c87cd8edd7df5d3b2daacc8f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 77b4b7647abc748c3800f36bafe638d49c42cfae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.26643199999995,0.00382,262.0,0.479,209.0 diff --git a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 19f9bf5dc0c5a56d3a144a04d812c93992361bde..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_20:01:35_6f4424bb086d3d090855862be5aff64eb8ed7101/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 20:50:07,881][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 20:50:07,882][benchmark][INFO] - + Setting seed(42) -[2023-08-18 20:50:09,262][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 20:50:09,262][backend][INFO] - Configuring pytorch backend -[2023-08-18 20:50:09,262][backend][INFO] - + Checking initial device isolation -[2023-08-18 20:50:09,263][backend][INFO] - + Checking contineous device isolation -[2023-08-18 20:50:09,263][pytorch][INFO] - + Disabling gradients -[2023-08-18 20:50:09,263][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 20:50:10,173][pytorch][INFO] - + Turning on eval mode -[2023-08-18 20:50:10,174][inference][INFO] - Running inference benchmark -[2023-08-18 20:50:10,367][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 20:50:10,418][inference][INFO] - + Forward pass peak memory: 469.26643199999995 (MB) -[2023-08-18 20:50:10,419][inference][INFO] - + Warming up the forward pass -[2023-08-18 20:50:10,452][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 20:50:15,496][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-18 20:50:15,497][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-18 20:50:15,498][inference][INFO] - + Warming up the generation pass -[2023-08-18 20:50:16,081][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 20:50:21,350][inference][INFO] - + Generation pass latency: 4.79e-01 (s) -[2023-08-18 20:50:21,351][inference][INFO] - + Generation pass throughput: 209.00 (tokens/s) -[2023-08-18 20:50:21,351][inference][INFO] - Saving inference results -[2023-08-18 20:50:21,363][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6d86763f8f0641f26f058261db0c6815949350c5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 89b5616ed4dc13efe21c3f3aa4e50f08f7de8def..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.980864,0.00307,326.0 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/main.log deleted file mode 100644 index a8259ff6e3c85b8b17ec890b1e126a195792255a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 22:49:49,345][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:49:49,346][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:49:50,551][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 22:49:50,552][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:49:50,552][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:49:50,552][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:49:50,552][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:49:50,552][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:49:51,161][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:49:51,161][inference][INFO] - Running inference benchmark -[2023-08-18 22:49:51,284][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:49:51,286][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:49:51,343][inference][INFO] - + Forward pass peak memory: 466.980864 (MB) -[2023-08-18 22:49:51,344][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:49:51,345][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:49:51,377][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:49:56,429][inference][INFO] - + Forward pass latency: 3.07e-03 (s) -[2023-08-18 22:49:56,431][inference][INFO] - + Forward pass throughput: 326.00 (samples/s) -[2023-08-18 22:49:56,431][inference][INFO] - Saving inference results -[2023-08-18 22:49:56,441][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a68cf2299a843aa5a4ff04d42cfbc1dd04a4ed8b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 2bcba2755d650a0e57b945c627f144a3deed7537..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.09087999999997,0.00346,1160.0 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1e6968bf8eb68a474aae45e51cfddd7fc8ac38bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 22:49:56,809][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:49:56,810][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:49:57,243][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 22:49:57,243][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:49:57,244][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:49:57,244][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:49:57,244][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:49:57,244][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:49:57,361][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:49:57,362][inference][INFO] - Running inference benchmark -[2023-08-18 22:49:57,484][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:49:57,485][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:49:57,525][inference][INFO] - + Forward pass peak memory: 468.09087999999997 (MB) -[2023-08-18 22:49:57,526][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:49:57,528][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:49:57,564][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:50:02,608][inference][INFO] - + Forward pass latency: 3.46e-03 (s) -[2023-08-18 22:50:02,610][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-18 22:50:02,610][inference][INFO] - Saving inference results -[2023-08-18 22:50:02,617][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6b2879161dd5a03bb60f8257ed9733314d668681..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index b9a7f2cfd84c90f391bc2f3277291a960453fc96..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.75443199999995,0.00373,268.0,0.487,205.0 diff --git a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 326b46f16d91f0b45361c215964bd6c813ab71be..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:08:34_4d64157ed3795090110dd8aceb9b7a5ff78bb247/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 22:50:07,342][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:50:07,343][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:50:08,779][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 22:50:08,779][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:50:08,779][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:50:08,779][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:50:08,780][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:50:08,780][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:50:09,414][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:50:09,415][inference][INFO] - Running inference benchmark -[2023-08-18 22:50:09,611][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:50:09,660][inference][INFO] - + Forward pass peak memory: 468.75443199999995 (MB) -[2023-08-18 22:50:09,662][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:50:09,695][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:50:14,740][inference][INFO] - + Forward pass latency: 3.73e-03 (s) -[2023-08-18 22:50:14,741][inference][INFO] - + Forward pass throughput: 268.00 (samples/s) -[2023-08-18 22:50:14,742][inference][INFO] - + Warming up the generation pass -[2023-08-18 22:50:15,296][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 22:50:20,650][inference][INFO] - + Generation pass latency: 4.87e-01 (s) -[2023-08-18 22:50:20,651][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-18 22:50:20,651][inference][INFO] - Saving inference results -[2023-08-18 22:50:20,663][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5d330c9c31799bfd4dcd7b7128c55761c067b4cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 5f1b0c1cdbfe01b86df5a458d80a130390c2953c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.132416,0.00381,262.0 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/main.log deleted file mode 100644 index 375a099bdde1653eb0cdb7d57d204bd1bb7a7c13..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 22:51:23,787][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:51:23,788][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:51:24,995][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 22:51:24,995][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:51:24,995][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:51:24,995][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:51:24,995][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:51:24,995][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:51:25,605][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:51:25,605][inference][INFO] - Running inference benchmark -[2023-08-18 22:51:25,724][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:51:25,725][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:51:25,783][inference][INFO] - + Forward pass peak memory: 467.132416 (MB) -[2023-08-18 22:51:25,784][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:51:25,786][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:51:25,822][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:51:30,867][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-18 22:51:30,868][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-18 22:51:30,868][inference][INFO] - Saving inference results -[2023-08-18 22:51:30,878][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 589fe42466a08e5c822dac7a9948b053b0027518..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 8db263f787c07f222c9405cc6aec7b5f364f1c41..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.14822399999997,0.00358,1120.0 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/main.log deleted file mode 100644 index da8bda796d2dbd9c8db6c3cb50bba91f63e20fea..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 22:51:31,269][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:51:31,271][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:51:31,736][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 22:51:31,737][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:51:31,737][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:51:31,737][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:51:31,737][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:51:31,737][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:51:31,853][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:51:31,854][inference][INFO] - Running inference benchmark -[2023-08-18 22:51:31,976][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:51:31,978][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:51:32,022][inference][INFO] - + Forward pass peak memory: 468.14822399999997 (MB) -[2023-08-18 22:51:32,023][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:51:32,024][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:51:32,062][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:51:37,105][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-18 22:51:37,106][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-18 22:51:37,106][inference][INFO] - Saving inference results -[2023-08-18 22:51:37,113][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6aa1aa04df1de62174b3695afb96a49b113e0973..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 31234e4b0fd19fe94d40919bef6b58ef73afe9bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.26643199999995,0.00382,262.0,0.526,190.0 diff --git a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 7c42c0beffbea00d9538e329e5c94158adb69ee7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:12:28_6c811a322f06a4ce0f3dcf1d6be9e334ee69d8f7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 22:51:42,077][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:51:42,078][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:51:43,450][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 22:51:43,450][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:51:43,450][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:51:43,450][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:51:43,451][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:51:43,451][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:51:44,100][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:51:44,100][inference][INFO] - Running inference benchmark -[2023-08-18 22:51:44,300][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:51:44,352][inference][INFO] - + Forward pass peak memory: 469.26643199999995 (MB) -[2023-08-18 22:51:44,354][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:51:44,391][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:51:49,434][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-18 22:51:49,436][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-18 22:51:49,436][inference][INFO] - + Warming up the generation pass -[2023-08-18 22:51:49,928][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 22:51:55,186][inference][INFO] - + Generation pass latency: 5.26e-01 (s) -[2023-08-18 22:51:55,187][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s) -[2023-08-18 22:51:55,187][inference][INFO] - Saving inference results -[2023-08-18 22:51:55,198][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f24ab5eea96409b061eedbbc18d2a60d0cbc41f3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 5df6cee0652aea71d663a20f8a088b9e9caba675..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.18976,0.0031,323.0 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/main.log b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/main.log deleted file mode 100644 index 317514f8f7615c747ff2286dd5e17ca3a9c6d46f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 22:52:58,250][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:52:58,251][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:52:59,616][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 22:52:59,616][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:52:59,616][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:52:59,617][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:52:59,617][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:52:59,617][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:53:00,244][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:53:00,245][inference][INFO] - Running inference benchmark -[2023-08-18 22:53:00,374][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:53:00,376][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:53:00,436][inference][INFO] - + Forward pass peak memory: 467.18976 (MB) -[2023-08-18 22:53:00,437][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:53:00,439][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:53:00,480][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:53:05,535][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-18 22:53:05,536][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-18 22:53:05,536][inference][INFO] - Saving inference results -[2023-08-18 22:53:05,548][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d4634a77506d4e56945c1160e04b81defe4030b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 11698da6b6b90110ee9634adecf333afca454455..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.29977599999995,0.0034,1180.0 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/main.log b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/main.log deleted file mode 100644 index d7dad7215e888d953a2d241358a9f3db259a58da..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-18 22:53:05,930][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:53:05,931][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:53:06,367][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-18 22:53:06,368][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:53:06,368][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:53:06,368][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:53:06,368][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:53:06,368][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:53:06,509][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:53:06,509][inference][INFO] - Running inference benchmark -[2023-08-18 22:53:06,631][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:53:06,632][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:53:06,675][inference][INFO] - + Forward pass peak memory: 468.29977599999995 (MB) -[2023-08-18 22:53:06,676][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-18 22:53:06,678][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:53:06,713][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:53:11,759][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-18 22:53:11,760][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-18 22:53:11,760][inference][INFO] - Saving inference results -[2023-08-18 22:53:11,769][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a3d59c74e6e57909305bf60f9c2c06011729eab3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fbcd9b87020ab242bfc04a49075cfd2039b49051..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.795392,0.00382,262.0,0.486,206.0 diff --git a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e95db562e777082fcece9a47f2d613651cdef93a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-18_21:30:29_6b82d936d49956ba7b43c5ee590f4868de373b65/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-18 22:53:16,708][benchmark][INFO] - Configuring inference benchmark -[2023-08-18 22:53:16,709][benchmark][INFO] - + Setting seed(42) -[2023-08-18 22:53:19,083][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-18 22:53:19,083][backend][INFO] - Configuring pytorch backend -[2023-08-18 22:53:19,083][backend][INFO] - + Checking initial device isolation -[2023-08-18 22:53:19,083][backend][INFO] - + Checking contineous device isolation -[2023-08-18 22:53:19,084][pytorch][INFO] - + Disabling gradients -[2023-08-18 22:53:19,084][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-18 22:53:19,796][pytorch][INFO] - + Turning on eval mode -[2023-08-18 22:53:19,796][inference][INFO] - Running inference benchmark -[2023-08-18 22:53:19,989][inference][INFO] - + Tracking forward pass peak memory -[2023-08-18 22:53:20,039][inference][INFO] - + Forward pass peak memory: 468.795392 (MB) -[2023-08-18 22:53:20,040][inference][INFO] - + Warming up the forward pass -[2023-08-18 22:53:20,073][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-18 22:53:25,117][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-18 22:53:25,118][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-18 22:53:25,119][inference][INFO] - + Warming up the generation pass -[2023-08-18 22:53:25,680][inference][INFO] - + Tracking generation latency and throughput -[2023-08-18 22:53:31,026][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-18 22:53:31,028][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-18 22:53:31,028][inference][INFO] - Saving inference results -[2023-08-18 22:53:31,041][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 784d0b18cffc668ee84b65f8a8de6108387adf4b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index baf26e4d0a1dd8d80bc4f290d42db9035d8a50e5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.911232,0.00314,318.0 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/main.log b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/main.log deleted file mode 100644 index 481908326abe90c78ad00f70db35437bafb63f68..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-19 10:50:03,904][benchmark][INFO] - Configuring inference benchmark -[2023-08-19 10:50:03,904][benchmark][INFO] - + Setting seed(42) -[2023-08-19 10:50:05,136][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-19 10:50:05,136][backend][INFO] - Configuring pytorch backend -[2023-08-19 10:50:05,136][backend][INFO] - + Checking initial device isolation -[2023-08-19 10:50:05,136][backend][INFO] - + Checking contineous device isolation -[2023-08-19 10:50:05,136][pytorch][INFO] - + Disabling gradients -[2023-08-19 10:50:05,136][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-19 10:50:05,769][pytorch][INFO] - + Turning on eval mode -[2023-08-19 10:50:05,769][inference][INFO] - Running inference benchmark -[2023-08-19 10:50:05,890][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-19 10:50:05,891][inference][INFO] - + Tracking forward pass peak memory -[2023-08-19 10:50:05,952][inference][INFO] - + Forward pass peak memory: 466.911232 (MB) -[2023-08-19 10:50:05,953][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-19 10:50:05,954][inference][INFO] - + Warming up the forward pass -[2023-08-19 10:50:05,991][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-19 10:50:11,042][inference][INFO] - + Forward pass latency: 3.14e-03 (s) -[2023-08-19 10:50:11,043][inference][INFO] - + Forward pass throughput: 318.00 (samples/s) -[2023-08-19 10:50:11,043][inference][INFO] - Saving inference results -[2023-08-19 10:50:11,053][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d23b9ac159970ab1f5b32650011a219e6139f1c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 692dc3c5b0fbc28f3d39671e92654563855474ff..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.98848,0.00348,1150.0 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/main.log b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/main.log deleted file mode 100644 index 14340c64d486b461ee1f6e03fa7bb0aac6309aaa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-19 10:50:11,408][benchmark][INFO] - Configuring inference benchmark -[2023-08-19 10:50:11,409][benchmark][INFO] - + Setting seed(42) -[2023-08-19 10:50:11,842][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-19 10:50:11,843][backend][INFO] - Configuring pytorch backend -[2023-08-19 10:50:11,843][backend][INFO] - + Checking initial device isolation -[2023-08-19 10:50:11,843][backend][INFO] - + Checking contineous device isolation -[2023-08-19 10:50:11,843][pytorch][INFO] - + Disabling gradients -[2023-08-19 10:50:11,843][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-19 10:50:11,957][pytorch][INFO] - + Turning on eval mode -[2023-08-19 10:50:11,957][inference][INFO] - Running inference benchmark -[2023-08-19 10:50:12,076][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-19 10:50:12,077][inference][INFO] - + Tracking forward pass peak memory -[2023-08-19 10:50:12,119][inference][INFO] - + Forward pass peak memory: 467.98848 (MB) -[2023-08-19 10:50:12,119][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-19 10:50:12,121][inference][INFO] - + Warming up the forward pass -[2023-08-19 10:50:12,157][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-19 10:50:17,202][inference][INFO] - + Forward pass latency: 3.48e-03 (s) -[2023-08-19 10:50:17,203][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-19 10:50:17,203][inference][INFO] - Saving inference results -[2023-08-19 10:50:17,212][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f9f21b9f8581d2893f7b4518360ca28a8946c705..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index dd0e269a6b580dc9267a265f5af349023e8ff850..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.836352,0.00315,317.0,0.481,208.0 diff --git a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6bec0c17e15edea1f64ac07ecc895fd5d99c3144..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-19_09:15:38_1982dd3b15867c46e1c20645901b0de469fd935f/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-19 10:50:21,971][benchmark][INFO] - Configuring inference benchmark -[2023-08-19 10:50:21,972][benchmark][INFO] - + Setting seed(42) -[2023-08-19 10:50:23,330][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-19 10:50:23,330][backend][INFO] - Configuring pytorch backend -[2023-08-19 10:50:23,331][backend][INFO] - + Checking initial device isolation -[2023-08-19 10:50:23,331][backend][INFO] - + Checking contineous device isolation -[2023-08-19 10:50:23,331][pytorch][INFO] - + Disabling gradients -[2023-08-19 10:50:23,331][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-19 10:50:24,125][pytorch][INFO] - + Turning on eval mode -[2023-08-19 10:50:24,126][inference][INFO] - Running inference benchmark -[2023-08-19 10:50:24,318][inference][INFO] - + Tracking forward pass peak memory -[2023-08-19 10:50:24,365][inference][INFO] - + Forward pass peak memory: 468.836352 (MB) -[2023-08-19 10:50:24,366][inference][INFO] - + Warming up the forward pass -[2023-08-19 10:50:24,400][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-19 10:50:29,451][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-19 10:50:29,453][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-19 10:50:29,453][inference][INFO] - + Warming up the generation pass -[2023-08-19 10:50:29,946][inference][INFO] - + Tracking generation latency and throughput -[2023-08-19 10:50:35,241][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-19 10:50:35,242][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-19 10:50:35,243][inference][INFO] - Saving inference results -[2023-08-19 10:50:35,254][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 39677321f5b9ac4c978e82f3d8093fed289afedd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a04ed3b90a3891c01abfe37367a8e0afe5d5d9fc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.93580799999995,0.00361,277.0 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5445fb4e506b7e4d0170b1997b3779ff82a4951f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 08:50:08,394][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 08:50:08,395][benchmark][INFO] - + Setting seed(42) -[2023-08-21 08:50:10,399][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 08:50:10,399][backend][INFO] - Configuring pytorch backend -[2023-08-21 08:50:10,400][backend][INFO] - + Checking initial device isolation -[2023-08-21 08:50:10,400][backend][INFO] - + Checking contineous device isolation -[2023-08-21 08:50:10,400][pytorch][INFO] - + Disabling gradients -[2023-08-21 08:50:10,400][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 08:50:11,017][pytorch][INFO] - + Turning on eval mode -[2023-08-21 08:50:11,017][inference][INFO] - Running inference benchmark -[2023-08-21 08:50:11,135][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:50:11,136][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 08:50:11,193][inference][INFO] - + Forward pass peak memory: 466.93580799999995 (MB) -[2023-08-21 08:50:11,194][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:50:11,196][inference][INFO] - + Warming up the forward pass -[2023-08-21 08:50:11,233][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 08:50:16,281][inference][INFO] - + Forward pass latency: 3.61e-03 (s) -[2023-08-21 08:50:16,282][inference][INFO] - + Forward pass throughput: 277.00 (samples/s) -[2023-08-21 08:50:16,282][inference][INFO] - Saving inference results -[2023-08-21 08:50:16,293][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e1c7ed63bec9803b9b87811bf93a4336b42c8fcb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e293b59737b1eaeba2c627f2ee5a18bb5ec822ec..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.97209599999996,0.00348,1150.0 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2924051678d4088c6e790b763c165d23a47b3a81..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 08:50:16,687][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 08:50:16,688][benchmark][INFO] - + Setting seed(42) -[2023-08-21 08:50:17,204][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 08:50:17,204][backend][INFO] - Configuring pytorch backend -[2023-08-21 08:50:17,204][backend][INFO] - + Checking initial device isolation -[2023-08-21 08:50:17,204][backend][INFO] - + Checking contineous device isolation -[2023-08-21 08:50:17,205][pytorch][INFO] - + Disabling gradients -[2023-08-21 08:50:17,205][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 08:50:17,315][pytorch][INFO] - + Turning on eval mode -[2023-08-21 08:50:17,315][inference][INFO] - Running inference benchmark -[2023-08-21 08:50:17,616][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:50:17,618][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 08:50:17,656][inference][INFO] - + Forward pass peak memory: 467.97209599999996 (MB) -[2023-08-21 08:50:17,657][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:50:17,659][inference][INFO] - + Warming up the forward pass -[2023-08-21 08:50:17,693][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 08:50:22,741][inference][INFO] - + Forward pass latency: 3.48e-03 (s) -[2023-08-21 08:50:22,743][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-21 08:50:22,743][inference][INFO] - Saving inference results -[2023-08-21 08:50:22,751][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7ba6a0baab5e81a20063f008321a66f2c522f79b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8ea26f24a7710e01be2c1ca5692d2b77deb3bd5e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.13536,0.00305,328.0,0.484,207.0 diff --git a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 90cbeb65fc03ba98ffb046436b0555620d24fe14..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_07:01:11_f92cc7034a49959b247a46a210b912e56a6f977d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 08:50:27,754][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 08:50:27,756][benchmark][INFO] - + Setting seed(42) -[2023-08-21 08:50:29,249][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 08:50:29,249][backend][INFO] - Configuring pytorch backend -[2023-08-21 08:50:29,249][backend][INFO] - + Checking initial device isolation -[2023-08-21 08:50:29,249][backend][INFO] - + Checking contineous device isolation -[2023-08-21 08:50:29,250][pytorch][INFO] - + Disabling gradients -[2023-08-21 08:50:29,250][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 08:50:29,902][pytorch][INFO] - + Turning on eval mode -[2023-08-21 08:50:29,903][inference][INFO] - Running inference benchmark -[2023-08-21 08:50:30,096][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 08:50:30,138][inference][INFO] - + Forward pass peak memory: 469.13536 (MB) -[2023-08-21 08:50:30,139][inference][INFO] - + Warming up the forward pass -[2023-08-21 08:50:30,170][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 08:50:35,223][inference][INFO] - + Forward pass latency: 3.05e-03 (s) -[2023-08-21 08:50:35,225][inference][INFO] - + Forward pass throughput: 328.00 (samples/s) -[2023-08-21 08:50:35,226][inference][INFO] - + Warming up the generation pass -[2023-08-21 08:50:35,716][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 08:50:41,046][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-08-21 08:50:41,047][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-21 08:50:41,047][inference][INFO] - Saving inference results -[2023-08-21 08:50:41,062][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 40d0456e255d45c5585fec2e20ede22038ba4b56..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 379014ba6807cb1151ca8a7d73648c0820cee9db..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.21024,0.00333,300.0 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/main.log deleted file mode 100644 index c042f7a2d4bb122ea402b77259733bd0711b1438..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 08:51:45,137][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 08:51:45,138][benchmark][INFO] - + Setting seed(42) -[2023-08-21 08:51:46,370][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 08:51:46,370][backend][INFO] - Configuring pytorch backend -[2023-08-21 08:51:46,370][backend][INFO] - + Checking initial device isolation -[2023-08-21 08:51:46,370][backend][INFO] - + Checking contineous device isolation -[2023-08-21 08:51:46,371][pytorch][INFO] - + Disabling gradients -[2023-08-21 08:51:46,371][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 08:51:46,981][pytorch][INFO] - + Turning on eval mode -[2023-08-21 08:51:46,982][inference][INFO] - Running inference benchmark -[2023-08-21 08:51:47,110][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:51:47,111][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 08:51:47,172][inference][INFO] - + Forward pass peak memory: 467.21024 (MB) -[2023-08-21 08:51:47,173][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:51:47,175][inference][INFO] - + Warming up the forward pass -[2023-08-21 08:51:47,216][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 08:51:52,267][inference][INFO] - + Forward pass latency: 3.33e-03 (s) -[2023-08-21 08:51:52,268][inference][INFO] - + Forward pass throughput: 300.00 (samples/s) -[2023-08-21 08:51:52,268][inference][INFO] - Saving inference results -[2023-08-21 08:51:52,279][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ead5207800c745a0c2f95af29394bd3e012770b5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 92152dfec475102e8980c150377821a9a14ef9db..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.279296,0.00352,1140.0 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/main.log deleted file mode 100644 index eb972984bc0ebe1ba8fc31fd2e9abffa55312211..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 08:51:52,653][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 08:51:52,654][benchmark][INFO] - + Setting seed(42) -[2023-08-21 08:51:53,091][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 08:51:53,091][backend][INFO] - Configuring pytorch backend -[2023-08-21 08:51:53,091][backend][INFO] - + Checking initial device isolation -[2023-08-21 08:51:53,091][backend][INFO] - + Checking contineous device isolation -[2023-08-21 08:51:53,092][pytorch][INFO] - + Disabling gradients -[2023-08-21 08:51:53,092][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 08:51:53,211][pytorch][INFO] - + Turning on eval mode -[2023-08-21 08:51:53,212][inference][INFO] - Running inference benchmark -[2023-08-21 08:51:53,341][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:51:53,342][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 08:51:53,390][inference][INFO] - + Forward pass peak memory: 468.279296 (MB) -[2023-08-21 08:51:53,392][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 08:51:53,393][inference][INFO] - + Warming up the forward pass -[2023-08-21 08:51:53,444][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 08:51:58,491][inference][INFO] - + Forward pass latency: 3.52e-03 (s) -[2023-08-21 08:51:58,492][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-08-21 08:51:58,493][inference][INFO] - Saving inference results -[2023-08-21 08:51:58,501][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e2678ce5a7692e82419324d1205bc339741abc5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2b4a75be69db26de321fdfb45e2925a48ca09ea7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.25824,0.00312,321.0,0.481,208.0 diff --git a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2d8ae8b64249c872bb72d13985bc90e7fb78b3e9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_08:04:44_9627c3da4af317a5cf75e6dbdca7e7f94d08e4b0/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 08:52:03,246][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 08:52:03,247][benchmark][INFO] - + Setting seed(42) -[2023-08-21 08:52:04,794][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 08:52:04,794][backend][INFO] - Configuring pytorch backend -[2023-08-21 08:52:04,794][backend][INFO] - + Checking initial device isolation -[2023-08-21 08:52:04,794][backend][INFO] - + Checking contineous device isolation -[2023-08-21 08:52:04,794][pytorch][INFO] - + Disabling gradients -[2023-08-21 08:52:04,794][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 08:52:05,426][pytorch][INFO] - + Turning on eval mode -[2023-08-21 08:52:05,427][inference][INFO] - Running inference benchmark -[2023-08-21 08:52:05,637][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 08:52:05,689][inference][INFO] - + Forward pass peak memory: 469.25824 (MB) -[2023-08-21 08:52:05,690][inference][INFO] - + Warming up the forward pass -[2023-08-21 08:52:05,723][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 08:52:10,773][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-08-21 08:52:10,775][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-08-21 08:52:10,776][inference][INFO] - + Warming up the generation pass -[2023-08-21 08:52:11,270][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 08:52:16,562][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-21 08:52:16,562][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-21 08:52:16,563][inference][INFO] - Saving inference results -[2023-08-21 08:52:16,576][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index d0e4977a43c5fa78d385428f3d3fa3f3a1209b1a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 13ed7eb83c45e700f46a12079e73a31af39cc912..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.410944,0.00334,299.0 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/main.log deleted file mode 100644 index f5923e46c19c9ffca0ffc9edcb48013ac97d6fe7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 10:49:38,831][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 10:49:38,832][benchmark][INFO] - + Setting seed(42) -[2023-08-21 10:49:40,032][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 10:49:40,032][backend][INFO] - Configuring pytorch backend -[2023-08-21 10:49:40,032][backend][INFO] - + Checking initial device isolation -[2023-08-21 10:49:40,033][backend][INFO] - + Checking contineous device isolation -[2023-08-21 10:49:40,033][pytorch][INFO] - + Disabling gradients -[2023-08-21 10:49:40,033][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 10:49:40,645][pytorch][INFO] - + Turning on eval mode -[2023-08-21 10:49:40,645][inference][INFO] - Running inference benchmark -[2023-08-21 10:49:40,851][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:49:40,853][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 10:49:40,916][inference][INFO] - + Forward pass peak memory: 467.410944 (MB) -[2023-08-21 10:49:40,917][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:49:40,919][inference][INFO] - + Warming up the forward pass -[2023-08-21 10:49:40,953][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 10:49:46,002][inference][INFO] - + Forward pass latency: 3.34e-03 (s) -[2023-08-21 10:49:46,003][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-21 10:49:46,004][inference][INFO] - Saving inference results -[2023-08-21 10:49:46,014][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 22713bfb4bb3a12007e7b03f88a0074acf14d438..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a1ccc27a7433757192e9e3f693cbd56e649d463a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.508672,0.00435,920.0 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/main.log deleted file mode 100644 index d1e4cd0839becc93118dd4155453ec474ec02c0b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 10:49:46,384][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 10:49:46,386][benchmark][INFO] - + Setting seed(42) -[2023-08-21 10:49:46,820][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 10:49:46,820][backend][INFO] - Configuring pytorch backend -[2023-08-21 10:49:46,820][backend][INFO] - + Checking initial device isolation -[2023-08-21 10:49:46,820][backend][INFO] - + Checking contineous device isolation -[2023-08-21 10:49:46,821][pytorch][INFO] - + Disabling gradients -[2023-08-21 10:49:46,821][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 10:49:46,935][pytorch][INFO] - + Turning on eval mode -[2023-08-21 10:49:46,935][inference][INFO] - Running inference benchmark -[2023-08-21 10:49:47,056][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:49:47,058][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 10:49:47,102][inference][INFO] - + Forward pass peak memory: 468.508672 (MB) -[2023-08-21 10:49:47,103][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:49:47,106][inference][INFO] - + Warming up the forward pass -[2023-08-21 10:49:47,151][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 10:49:52,191][inference][INFO] - + Forward pass latency: 4.35e-03 (s) -[2023-08-21 10:49:52,192][inference][INFO] - + Forward pass throughput: 920.00 (samples/s) -[2023-08-21 10:49:52,192][inference][INFO] - Saving inference results -[2023-08-21 10:49:52,200][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f09972dd9a423e0cb61a15be258e140d1f69fdf0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c3b9d7cd2330c4c3817343b4c239ef08dcfe66a5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.250048,0.00336,298.0,0.48,208.0 diff --git a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b2e0e9c9cfe44bdc497661c35c5824843b3c997b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:08:38_f09db47a71ddef60ccc120b953ee32326c9253a3/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 10:49:56,956][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 10:49:56,957][benchmark][INFO] - + Setting seed(42) -[2023-08-21 10:49:58,346][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 10:49:58,347][backend][INFO] - Configuring pytorch backend -[2023-08-21 10:49:58,347][backend][INFO] - + Checking initial device isolation -[2023-08-21 10:49:58,347][backend][INFO] - + Checking contineous device isolation -[2023-08-21 10:49:58,347][pytorch][INFO] - + Disabling gradients -[2023-08-21 10:49:58,348][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 10:49:58,998][pytorch][INFO] - + Turning on eval mode -[2023-08-21 10:49:58,999][inference][INFO] - Running inference benchmark -[2023-08-21 10:49:59,196][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 10:49:59,243][inference][INFO] - + Forward pass peak memory: 469.250048 (MB) -[2023-08-21 10:49:59,244][inference][INFO] - + Warming up the forward pass -[2023-08-21 10:49:59,275][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 10:50:04,323][inference][INFO] - + Forward pass latency: 3.36e-03 (s) -[2023-08-21 10:50:04,325][inference][INFO] - + Forward pass throughput: 298.00 (samples/s) -[2023-08-21 10:50:04,325][inference][INFO] - + Warming up the generation pass -[2023-08-21 10:50:04,814][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 10:50:10,093][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-08-21 10:50:10,094][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-21 10:50:10,094][inference][INFO] - Saving inference results -[2023-08-21 10:50:10,106][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index 8555d6744dd0e5881f14d8db6ba7f34fd0488b3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1ffbad91aa771e0b4d67d7e4b4bafcffc8c11c8c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8748d22fe3c2d5758d4d82aa74516bd5a24d74c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2ae404f545bc2e48121313a971c7ea005f6e2000..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.369984,0.00373,268.0 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/main.log deleted file mode 100644 index 529a6f29048728dcac9ca0c86719100e7c3738d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 10:51:13,755][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 10:51:13,757][benchmark][INFO] - + Setting seed(42) -[2023-08-21 10:51:15,336][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 10:51:15,336][backend][INFO] - Configuring pytorch backend -[2023-08-21 10:51:15,336][backend][INFO] - + Checking initial device isolation -[2023-08-21 10:51:15,337][backend][INFO] - + Checking contineous device isolation -[2023-08-21 10:51:15,337][pytorch][INFO] - + Disabling gradients -[2023-08-21 10:51:15,337][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 10:51:15,969][pytorch][INFO] - + Turning on eval mode -[2023-08-21 10:51:15,969][inference][INFO] - Running inference benchmark -[2023-08-21 10:51:16,091][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:51:16,093][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 10:51:16,150][inference][INFO] - + Forward pass peak memory: 467.369984 (MB) -[2023-08-21 10:51:16,151][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:51:16,153][inference][INFO] - + Warming up the forward pass -[2023-08-21 10:51:16,186][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 10:51:21,233][inference][INFO] - + Forward pass latency: 3.73e-03 (s) -[2023-08-21 10:51:21,235][inference][INFO] - + Forward pass throughput: 268.00 (samples/s) -[2023-08-21 10:51:21,235][inference][INFO] - Saving inference results -[2023-08-21 10:51:21,246][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index fc02352dc97d220d55fe04bc4edef4200110f22a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index b3095742b66c54409be81c86100c405af51e7721..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 819b1876e26fdd758df23050ecd02bd8d430c90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index adfaebc2359b53e4c6ce53fd8f61f6ce5b393349..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.393984,0.0043,930.0 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/main.log deleted file mode 100644 index 87f86fd82c6908fbaed27d82149a756d8d1a6aa0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 10:51:21,620][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 10:51:21,622][benchmark][INFO] - + Setting seed(42) -[2023-08-21 10:51:22,065][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 10:51:22,066][backend][INFO] - Configuring pytorch backend -[2023-08-21 10:51:22,066][backend][INFO] - + Checking initial device isolation -[2023-08-21 10:51:22,066][backend][INFO] - + Checking contineous device isolation -[2023-08-21 10:51:22,067][pytorch][INFO] - + Disabling gradients -[2023-08-21 10:51:22,067][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 10:51:22,190][pytorch][INFO] - + Turning on eval mode -[2023-08-21 10:51:22,191][inference][INFO] - Running inference benchmark -[2023-08-21 10:51:22,313][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:51:22,314][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 10:51:22,357][inference][INFO] - + Forward pass peak memory: 468.393984 (MB) -[2023-08-21 10:51:22,358][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 10:51:22,360][inference][INFO] - + Warming up the forward pass -[2023-08-21 10:51:22,403][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 10:51:27,446][inference][INFO] - + Forward pass latency: 4.30e-03 (s) -[2023-08-21 10:51:27,447][inference][INFO] - + Forward pass throughput: 930.00 (samples/s) -[2023-08-21 10:51:27,447][inference][INFO] - Saving inference results -[2023-08-21 10:51:27,455][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 39258ef78f16622f3cbf970c22d0b92229e7cc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 501922e845109d1e88c809814bbbeb527b8461d9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index cb6bbe7e2787ba735853eaeafaa72ec86761d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f114ce5ddd4166640c47e5daec5c2785b59c80f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.98790399999996,0.00308,325.0,0.479,209.0 diff --git a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a09eacb32da44bde61a4e660bbb3f4cb08e4335b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_09:11:21_2f8acfea1ca11fe3479fb379ccbded516d0cff57/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 10:51:32,176][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 10:51:32,177][benchmark][INFO] - + Setting seed(42) -[2023-08-21 10:51:33,788][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 10:51:33,788][backend][INFO] - Configuring pytorch backend -[2023-08-21 10:51:33,789][backend][INFO] - + Checking initial device isolation -[2023-08-21 10:51:33,789][backend][INFO] - + Checking contineous device isolation -[2023-08-21 10:51:33,789][pytorch][INFO] - + Disabling gradients -[2023-08-21 10:51:33,789][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 10:51:34,413][pytorch][INFO] - + Turning on eval mode -[2023-08-21 10:51:34,413][inference][INFO] - Running inference benchmark -[2023-08-21 10:51:34,597][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 10:51:34,647][inference][INFO] - + Forward pass peak memory: 468.98790399999996 (MB) -[2023-08-21 10:51:34,649][inference][INFO] - + Warming up the forward pass -[2023-08-21 10:51:34,682][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 10:51:39,734][inference][INFO] - + Forward pass latency: 3.08e-03 (s) -[2023-08-21 10:51:39,736][inference][INFO] - + Forward pass throughput: 325.00 (samples/s) -[2023-08-21 10:51:39,737][inference][INFO] - + Warming up the generation pass -[2023-08-21 10:51:40,225][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 10:51:45,498][inference][INFO] - + Generation pass latency: 4.79e-01 (s) -[2023-08-21 10:51:45,499][inference][INFO] - + Generation pass throughput: 209.00 (tokens/s) -[2023-08-21 10:51:45,499][inference][INFO] - Saving inference results -[2023-08-21 10:51:45,511][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 71bd64aa79da02cb4d776b28f03b9b8470c410c9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 756b7fb63528284dc78f1a71aed018d0dc3682d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.94809599999996,0.00338,296.0 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 82a0ca38698fd21e99d2b26f69cc7a3987bd4972..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 12:58:23,939][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 12:58:23,940][benchmark][INFO] - + Setting seed(42) -[2023-08-21 12:58:25,221][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 12:58:25,221][backend][INFO] - Configuring pytorch backend -[2023-08-21 12:58:25,222][backend][INFO] - + Checking initial device isolation -[2023-08-21 12:58:25,222][backend][INFO] - + Checking contineous device isolation -[2023-08-21 12:58:25,222][pytorch][INFO] - + Disabling gradients -[2023-08-21 12:58:25,222][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 12:58:26,057][pytorch][INFO] - + Turning on eval mode -[2023-08-21 12:58:26,057][inference][INFO] - Running inference benchmark -[2023-08-21 12:58:26,176][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 12:58:26,178][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 12:58:26,235][inference][INFO] - + Forward pass peak memory: 466.94809599999996 (MB) -[2023-08-21 12:58:26,237][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 12:58:26,238][inference][INFO] - + Warming up the forward pass -[2023-08-21 12:58:26,270][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 12:58:31,317][inference][INFO] - + Forward pass latency: 3.38e-03 (s) -[2023-08-21 12:58:31,319][inference][INFO] - + Forward pass throughput: 296.00 (samples/s) -[2023-08-21 12:58:31,319][inference][INFO] - Saving inference results -[2023-08-21 12:58:31,337][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 10aab9e24b39824ed4d63328fd6e5d09b8ed9691..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1de0791c87d011b903cc5178b0d6f0f40e32e9de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.013056,0.00423,946.0 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/main.log deleted file mode 100644 index e038da69afe843dc6bcb0705f3fb520a80c8c1c9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 12:58:32,006][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 12:58:32,008][benchmark][INFO] - + Setting seed(42) -[2023-08-21 12:58:32,595][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 12:58:32,595][backend][INFO] - Configuring pytorch backend -[2023-08-21 12:58:32,595][backend][INFO] - + Checking initial device isolation -[2023-08-21 12:58:32,595][backend][INFO] - + Checking contineous device isolation -[2023-08-21 12:58:32,596][pytorch][INFO] - + Disabling gradients -[2023-08-21 12:58:32,596][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 12:58:32,719][pytorch][INFO] - + Turning on eval mode -[2023-08-21 12:58:32,720][inference][INFO] - Running inference benchmark -[2023-08-21 12:58:32,899][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 12:58:32,901][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 12:58:32,944][inference][INFO] - + Forward pass peak memory: 468.013056 (MB) -[2023-08-21 12:58:32,945][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 12:58:32,947][inference][INFO] - + Warming up the forward pass -[2023-08-21 12:58:32,991][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 12:58:38,030][inference][INFO] - + Forward pass latency: 4.23e-03 (s) -[2023-08-21 12:58:38,031][inference][INFO] - + Forward pass throughput: 946.00 (samples/s) -[2023-08-21 12:58:38,031][inference][INFO] - Saving inference results -[2023-08-21 12:58:38,039][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index afdf9278df10a9659659e564e6b9cfb9eb6a2c3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4e05584c06fb1cf51e702f314d25b05cbc98e889..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.42208,0.00386,259.0,0.486,206.0 diff --git a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f8c3c9e2f9c4d7ad9e6028a7a2ef98b1fe9319b5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:07:04_5c67682b169576c4859700d551090ff79d450a9a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 12:58:42,756][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 12:58:42,756][benchmark][INFO] - + Setting seed(42) -[2023-08-21 12:58:44,166][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 12:58:44,166][backend][INFO] - Configuring pytorch backend -[2023-08-21 12:58:44,166][backend][INFO] - + Checking initial device isolation -[2023-08-21 12:58:44,166][backend][INFO] - + Checking contineous device isolation -[2023-08-21 12:58:44,167][pytorch][INFO] - + Disabling gradients -[2023-08-21 12:58:44,167][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 12:58:44,805][pytorch][INFO] - + Turning on eval mode -[2023-08-21 12:58:44,806][inference][INFO] - Running inference benchmark -[2023-08-21 12:58:44,997][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 12:58:45,046][inference][INFO] - + Forward pass peak memory: 469.42208 (MB) -[2023-08-21 12:58:45,048][inference][INFO] - + Warming up the forward pass -[2023-08-21 12:58:45,086][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 12:58:50,132][inference][INFO] - + Forward pass latency: 3.86e-03 (s) -[2023-08-21 12:58:50,133][inference][INFO] - + Forward pass throughput: 259.00 (samples/s) -[2023-08-21 12:58:50,134][inference][INFO] - + Warming up the generation pass -[2023-08-21 12:58:50,631][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 12:58:55,978][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-21 12:58:55,979][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-21 12:58:55,979][inference][INFO] - Saving inference results -[2023-08-21 12:58:55,997][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3bfc5287d595f201d127617a97f49929e980f8fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a9e2749b9418ad82dc5dd5f16605371541f041bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.214336,0.00367,272.0 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 98037d621b0a2dcdc806291d134bfa8b0af18fd6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 13:00:00,497][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:00:00,497][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:00:01,718][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 13:00:01,718][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:00:01,718][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:00:01,718][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:00:01,718][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:00:01,718][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:00:02,682][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:00:02,682][inference][INFO] - Running inference benchmark -[2023-08-21 13:00:02,800][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:00:02,801][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:00:02,859][inference][INFO] - + Forward pass peak memory: 467.214336 (MB) -[2023-08-21 13:00:02,860][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:00:02,862][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:00:02,903][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:00:07,953][inference][INFO] - + Forward pass latency: 3.67e-03 (s) -[2023-08-21 13:00:07,954][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-08-21 13:00:07,955][inference][INFO] - Saving inference results -[2023-08-21 13:00:07,971][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index aa58f026ca89f964db4b63bc937910d82e15f17f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f45bb717b94b8204d3ad88ae91d7b787d4b396f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.226048,0.0041,976.0 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 5d213aa872b7712d51780fc6ccee8412f3a5f8c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 13:00:08,345][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:00:08,346][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:00:08,787][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 13:00:08,788][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:00:08,788][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:00:08,788][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:00:08,789][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:00:08,789][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:00:08,907][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:00:08,908][inference][INFO] - Running inference benchmark -[2023-08-21 13:00:09,030][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:00:09,032][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:00:09,074][inference][INFO] - + Forward pass peak memory: 468.226048 (MB) -[2023-08-21 13:00:09,075][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:00:09,077][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:00:09,119][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:00:14,161][inference][INFO] - + Forward pass latency: 4.10e-03 (s) -[2023-08-21 13:00:14,162][inference][INFO] - + Forward pass throughput: 976.00 (samples/s) -[2023-08-21 13:00:14,162][inference][INFO] - Saving inference results -[2023-08-21 13:00:14,170][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 0910daa3abfbeb51f4eadfb78dc744be9764007e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cee36027ca1eaed2a6ea2e119697f36399623e47..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.688896,0.00353,283.0,0.484,207.0 diff --git a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5ff05f3e9bb227a601fc783438155a9cd1979f0f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:09:34_e769ca3d287274143501b2803275367b2bff3e6a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 13:00:18,893][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:00:18,894][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:00:20,306][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 13:00:20,307][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:00:20,307][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:00:20,307][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:00:20,307][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:00:20,307][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:00:20,948][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:00:20,948][inference][INFO] - Running inference benchmark -[2023-08-21 13:00:21,149][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:00:21,197][inference][INFO] - + Forward pass peak memory: 468.688896 (MB) -[2023-08-21 13:00:21,198][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:00:21,230][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:00:26,277][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-21 13:00:26,278][inference][INFO] - + Forward pass throughput: 283.00 (samples/s) -[2023-08-21 13:00:26,279][inference][INFO] - + Warming up the generation pass -[2023-08-21 13:00:26,767][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 13:00:32,088][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-08-21 13:00:32,090][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-21 13:00:32,090][inference][INFO] - Saving inference results -[2023-08-21 13:00:32,106][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1ba82d0add2b5934c14fec2f86be7d57d8156352..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3f0a6efe62b99a8fb0f7bbba5a2577809e99c875..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.509824,0.0031,323.0 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/main.log deleted file mode 100644 index e5ae85bb6d8d0668ab78b10674b308f42e009725..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 13:01:37,465][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:01:37,466][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:01:38,778][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 13:01:38,779][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:01:38,779][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:01:38,779][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:01:38,779][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:01:38,779][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:01:39,445][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:01:39,446][inference][INFO] - Running inference benchmark -[2023-08-21 13:01:39,565][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:01:39,567][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:01:39,625][inference][INFO] - + Forward pass peak memory: 466.509824 (MB) -[2023-08-21 13:01:39,626][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:01:39,628][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:01:39,660][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:01:44,713][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-21 13:01:44,714][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-21 13:01:44,714][inference][INFO] - Saving inference results -[2023-08-21 13:01:44,732][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c52aaaabf9aebb6026a9f72102a9b563c00427c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 54ee62a2ad1bf290986a0a2a24056e3203cfa567..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.644416,0.00343,1170.0 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/main.log deleted file mode 100644 index fcc75ea8cddb5e1a8e9beb7f5f6fc5573a472fa2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 13:01:45,236][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:01:45,237][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:01:45,678][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 13:01:45,678][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:01:45,678][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:01:45,678][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:01:45,678][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:01:45,679][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:01:45,798][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:01:45,799][inference][INFO] - Running inference benchmark -[2023-08-21 13:01:45,921][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:01:45,923][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:01:45,966][inference][INFO] - + Forward pass peak memory: 467.644416 (MB) -[2023-08-21 13:01:45,967][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:01:45,969][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:01:46,004][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:01:51,052][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-21 13:01:51,053][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-21 13:01:51,053][inference][INFO] - Saving inference results -[2023-08-21 13:01:51,061][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 735b08d8573abc090295f3b680d983ee80677b6f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index ea5aed2a498fb93117e41a349f3cb58ca954d9de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.897792,0.00315,317.0,0.484,207.0 diff --git a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9213ced96eef9df885c5d640d7035045a5e3e064..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_11:40:04_2c1bcbf5ed2536566bcb52ffe37ff70278309205/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 13:01:55,955][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:01:55,956][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:01:57,596][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 13:01:57,597][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:01:57,597][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:01:57,597][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:01:57,597][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:01:57,597][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:01:58,239][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:01:58,240][inference][INFO] - Running inference benchmark -[2023-08-21 13:01:58,461][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:01:58,512][inference][INFO] - + Forward pass peak memory: 468.897792 (MB) -[2023-08-21 13:01:58,513][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:01:58,547][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:02:03,595][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-21 13:02:03,597][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-21 13:02:03,597][inference][INFO] - + Warming up the generation pass -[2023-08-21 13:02:04,089][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 13:02:09,418][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-08-21 13:02:09,419][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-21 13:02:09,419][inference][INFO] - Saving inference results -[2023-08-21 13:02:09,437][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fdb2a6dbe485698577198dfa96ac999d95b1f7aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 627f2ce7686d9e044faed1471e3320dc607ba01c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.804736,0.00319,313.0 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/main.log deleted file mode 100644 index efa4c538327c5ed22cf74707cad949888f3d9b62..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 13:03:12,620][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:03:12,621][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:03:13,823][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 13:03:13,824][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:03:13,824][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:03:13,824][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:03:13,824][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:03:13,825][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:03:14,425][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:03:14,425][inference][INFO] - Running inference benchmark -[2023-08-21 13:03:14,542][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:03:14,543][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:03:14,607][inference][INFO] - + Forward pass peak memory: 466.804736 (MB) -[2023-08-21 13:03:14,608][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:03:14,610][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:03:14,647][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:03:19,698][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-08-21 13:03:19,700][inference][INFO] - + Forward pass throughput: 313.00 (samples/s) -[2023-08-21 13:03:19,700][inference][INFO] - Saving inference results -[2023-08-21 13:03:19,714][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 912c954feb9e590a4e397c6778373fa747ed2f6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 43645af3154db8fa193a8289a189360b93a2a2dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.90246399999995,0.00346,1160.0 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/main.log deleted file mode 100644 index a539537e2b9840b27c68d65af6541e79821c50cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 13:03:20,428][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:03:20,429][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:03:20,859][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 13:03:20,859][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:03:20,860][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:03:20,860][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:03:20,860][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:03:20,860][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:03:20,971][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:03:20,972][inference][INFO] - Running inference benchmark -[2023-08-21 13:03:21,096][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:03:21,097][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:03:21,143][inference][INFO] - + Forward pass peak memory: 467.90246399999995 (MB) -[2023-08-21 13:03:21,145][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 13:03:21,146][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:03:21,183][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:03:26,229][inference][INFO] - + Forward pass latency: 3.46e-03 (s) -[2023-08-21 13:03:26,230][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-21 13:03:26,230][inference][INFO] - Saving inference results -[2023-08-21 13:03:26,239][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c40709e5b603f480c248b5aa62cbea4ccb0f67cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0862731efc069022508699870afd1d32dccd3a09..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.037056,0.00375,267.0,0.571,175.0 diff --git a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b20edc86612b59d279f67555ce04d4b503a82b70..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_12:33:43_2582bbde2ed3ee1b25c5886df35c07376ee930c4/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 13:03:31,427][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 13:03:31,429][benchmark][INFO] - + Setting seed(42) -[2023-08-21 13:03:32,815][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 13:03:32,815][backend][INFO] - Configuring pytorch backend -[2023-08-21 13:03:32,815][backend][INFO] - + Checking initial device isolation -[2023-08-21 13:03:32,815][backend][INFO] - + Checking contineous device isolation -[2023-08-21 13:03:32,816][pytorch][INFO] - + Disabling gradients -[2023-08-21 13:03:32,816][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 13:03:33,462][pytorch][INFO] - + Turning on eval mode -[2023-08-21 13:03:33,463][inference][INFO] - Running inference benchmark -[2023-08-21 13:03:33,657][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 13:03:33,706][inference][INFO] - + Forward pass peak memory: 469.037056 (MB) -[2023-08-21 13:03:33,708][inference][INFO] - + Warming up the forward pass -[2023-08-21 13:03:33,740][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 13:03:38,786][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-21 13:03:38,788][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-21 13:03:38,788][inference][INFO] - + Warming up the generation pass -[2023-08-21 13:03:39,357][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 13:03:44,495][inference][INFO] - + Generation pass latency: 5.71e-01 (s) -[2023-08-21 13:03:44,496][inference][INFO] - + Generation pass throughput: 175.00 (tokens/s) -[2023-08-21 13:03:44,496][inference][INFO] - Saving inference results -[2023-08-21 13:03:44,512][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2bacb73eac65d69b91a20cf353e9a538de245b41..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a5a9c870490e765899f42701e84b6b0d047646a5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.649088,0.00321,312.0 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 26c736a46e5c2e09ec62a2ebf48933073700d6fc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 14:50:59,572][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 14:50:59,573][benchmark][INFO] - + Setting seed(42) -[2023-08-21 14:51:00,822][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 14:51:00,822][backend][INFO] - Configuring pytorch backend -[2023-08-21 14:51:00,823][backend][INFO] - + Checking initial device isolation -[2023-08-21 14:51:00,823][backend][INFO] - + Checking contineous device isolation -[2023-08-21 14:51:00,823][pytorch][INFO] - + Disabling gradients -[2023-08-21 14:51:00,823][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 14:51:01,425][pytorch][INFO] - + Turning on eval mode -[2023-08-21 14:51:01,426][inference][INFO] - Running inference benchmark -[2023-08-21 14:51:01,543][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:51:01,544][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 14:51:01,601][inference][INFO] - + Forward pass peak memory: 466.649088 (MB) -[2023-08-21 14:51:01,602][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:51:01,603][inference][INFO] - + Warming up the forward pass -[2023-08-21 14:51:01,646][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 14:51:06,697][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-08-21 14:51:06,699][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-21 14:51:06,699][inference][INFO] - Saving inference results -[2023-08-21 14:51:06,711][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8eb2a220b7345bffdfc5ab96b922eecba7ddd148..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3d2480451a11ef3486a2e48c868a66358b705c32..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.74271999999996,0.00355,1130.0 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 69cb3a290059ffd805944181e93ae5483d562b0c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 14:51:07,084][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 14:51:07,085][benchmark][INFO] - + Setting seed(42) -[2023-08-21 14:51:07,537][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 14:51:07,537][backend][INFO] - Configuring pytorch backend -[2023-08-21 14:51:07,537][backend][INFO] - + Checking initial device isolation -[2023-08-21 14:51:07,538][backend][INFO] - + Checking contineous device isolation -[2023-08-21 14:51:07,538][pytorch][INFO] - + Disabling gradients -[2023-08-21 14:51:07,538][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 14:51:07,653][pytorch][INFO] - + Turning on eval mode -[2023-08-21 14:51:07,653][inference][INFO] - Running inference benchmark -[2023-08-21 14:51:07,771][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:51:07,772][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 14:51:07,814][inference][INFO] - + Forward pass peak memory: 467.74271999999996 (MB) -[2023-08-21 14:51:07,815][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:51:07,817][inference][INFO] - + Warming up the forward pass -[2023-08-21 14:51:07,854][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 14:51:12,898][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-08-21 14:51:12,899][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-21 14:51:12,900][inference][INFO] - Saving inference results -[2023-08-21 14:51:12,907][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a09898e375fb61c2988017369e47e8923415eedc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f9078c450ba53971cbe2de7726139b2a158ff15d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.8896,0.00387,258.0,0.505,198.0 diff --git a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6a52fc402b8ed779fa281499b7c51d8d6b41baec..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_13:20:44_2df24228d68872d79304b932a68cf56de3061f5b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 14:51:17,677][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 14:51:17,678][benchmark][INFO] - + Setting seed(42) -[2023-08-21 14:51:19,167][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 14:51:19,167][backend][INFO] - Configuring pytorch backend -[2023-08-21 14:51:19,167][backend][INFO] - + Checking initial device isolation -[2023-08-21 14:51:19,167][backend][INFO] - + Checking contineous device isolation -[2023-08-21 14:51:19,168][pytorch][INFO] - + Disabling gradients -[2023-08-21 14:51:19,168][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 14:51:19,789][pytorch][INFO] - + Turning on eval mode -[2023-08-21 14:51:19,790][inference][INFO] - Running inference benchmark -[2023-08-21 14:51:20,021][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 14:51:20,071][inference][INFO] - + Forward pass peak memory: 468.8896 (MB) -[2023-08-21 14:51:20,072][inference][INFO] - + Warming up the forward pass -[2023-08-21 14:51:20,106][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 14:51:25,149][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-08-21 14:51:25,150][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-21 14:51:25,151][inference][INFO] - + Warming up the generation pass -[2023-08-21 14:51:25,741][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 14:51:31,293][inference][INFO] - + Generation pass latency: 5.05e-01 (s) -[2023-08-21 14:51:31,294][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s) -[2023-08-21 14:51:31,294][inference][INFO] - Saving inference results -[2023-08-21 14:51:31,306][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f3a6ccd8c72d6254183050bde8d068cda720d8d2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 75a951b9325bd02b40f49d55103188de816513c4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.526208,0.00316,316.0 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/main.log deleted file mode 100644 index ed7d514b728ae52dfef82cef30b86e0646f6a5a8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 14:52:35,173][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 14:52:35,174][benchmark][INFO] - + Setting seed(42) -[2023-08-21 14:52:36,380][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 14:52:36,381][backend][INFO] - Configuring pytorch backend -[2023-08-21 14:52:36,381][backend][INFO] - + Checking initial device isolation -[2023-08-21 14:52:36,381][backend][INFO] - + Checking contineous device isolation -[2023-08-21 14:52:36,381][pytorch][INFO] - + Disabling gradients -[2023-08-21 14:52:36,381][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 14:52:36,992][pytorch][INFO] - + Turning on eval mode -[2023-08-21 14:52:36,993][inference][INFO] - Running inference benchmark -[2023-08-21 14:52:37,133][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:52:37,135][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 14:52:37,194][inference][INFO] - + Forward pass peak memory: 466.526208 (MB) -[2023-08-21 14:52:37,196][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:52:37,197][inference][INFO] - + Warming up the forward pass -[2023-08-21 14:52:37,230][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 14:52:42,280][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-21 14:52:42,282][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-21 14:52:42,282][inference][INFO] - Saving inference results -[2023-08-21 14:52:42,293][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 591cf7869b727f898ad04914c76f08d1f89c4225..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d9868f328bc194667f536c93e86b500c097442f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.62803199999996,0.00353,1130.0 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/main.log deleted file mode 100644 index f63f7825e92543566a904df6e4652484e9973755..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 14:52:42,678][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 14:52:42,679][benchmark][INFO] - + Setting seed(42) -[2023-08-21 14:52:43,111][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 14:52:43,111][backend][INFO] - Configuring pytorch backend -[2023-08-21 14:52:43,111][backend][INFO] - + Checking initial device isolation -[2023-08-21 14:52:43,112][backend][INFO] - + Checking contineous device isolation -[2023-08-21 14:52:43,112][pytorch][INFO] - + Disabling gradients -[2023-08-21 14:52:43,112][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 14:52:43,226][pytorch][INFO] - + Turning on eval mode -[2023-08-21 14:52:43,227][inference][INFO] - Running inference benchmark -[2023-08-21 14:52:43,345][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:52:43,346][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 14:52:43,391][inference][INFO] - + Forward pass peak memory: 467.62803199999996 (MB) -[2023-08-21 14:52:43,392][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 14:52:43,393][inference][INFO] - + Warming up the forward pass -[2023-08-21 14:52:43,435][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 14:52:48,482][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-21 14:52:48,483][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-21 14:52:48,483][inference][INFO] - Saving inference results -[2023-08-21 14:52:48,493][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7c07115aaf7bc662b2aa942e38ee5cefa412be5b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 9841eedac47552f8a6e1b560ba0e14be45002621..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.8896,0.00387,258.0,0.482,207.0 diff --git a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9a45caa0c31491ac7b69d6eb2f40411daa2907d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_14:20:05_8608bf2049a10f8d23043e1bb196707a1c1b3fe5/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 14:52:53,308][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 14:52:53,309][benchmark][INFO] - + Setting seed(42) -[2023-08-21 14:52:54,711][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 14:52:54,711][backend][INFO] - Configuring pytorch backend -[2023-08-21 14:52:54,712][backend][INFO] - + Checking initial device isolation -[2023-08-21 14:52:54,712][backend][INFO] - + Checking contineous device isolation -[2023-08-21 14:52:54,712][pytorch][INFO] - + Disabling gradients -[2023-08-21 14:52:54,712][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 14:52:55,348][pytorch][INFO] - + Turning on eval mode -[2023-08-21 14:52:55,349][inference][INFO] - Running inference benchmark -[2023-08-21 14:52:55,547][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 14:52:55,597][inference][INFO] - + Forward pass peak memory: 468.8896 (MB) -[2023-08-21 14:52:55,598][inference][INFO] - + Warming up the forward pass -[2023-08-21 14:52:55,631][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 14:53:00,676][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-08-21 14:53:00,678][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-21 14:53:00,678][inference][INFO] - + Warming up the generation pass -[2023-08-21 14:53:01,169][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 14:53:06,476][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-21 14:53:06,477][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-21 14:53:06,477][inference][INFO] - Saving inference results -[2023-08-21 14:53:06,489][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a5e881791ad80bbc2eeff368b9f94e789103f0c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e0c5a7f06aa2e99d50047393927f1a5663c7b6f0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.73862399999996,0.0032,312.0 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/main.log deleted file mode 100644 index be2718bda919eb39ecaeb9a25063e55c7af9069c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 16:49:46,372][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 16:49:46,373][benchmark][INFO] - + Setting seed(42) -[2023-08-21 16:49:47,607][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 16:49:47,608][backend][INFO] - Configuring pytorch backend -[2023-08-21 16:49:47,608][backend][INFO] - + Checking initial device isolation -[2023-08-21 16:49:47,608][backend][INFO] - + Checking contineous device isolation -[2023-08-21 16:49:47,608][pytorch][INFO] - + Disabling gradients -[2023-08-21 16:49:47,608][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 16:49:48,253][pytorch][INFO] - + Turning on eval mode -[2023-08-21 16:49:48,254][inference][INFO] - Running inference benchmark -[2023-08-21 16:49:48,371][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:49:48,372][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 16:49:48,436][inference][INFO] - + Forward pass peak memory: 467.73862399999996 (MB) -[2023-08-21 16:49:48,438][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:49:48,439][inference][INFO] - + Warming up the forward pass -[2023-08-21 16:49:48,476][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 16:49:53,528][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-08-21 16:49:53,529][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-21 16:49:53,529][inference][INFO] - Saving inference results -[2023-08-21 16:49:53,539][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 30a00c18b4241137da86ab4358cccdc6d0e7d7f3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index cb2f51fddefa294b5b0d5626439542eb0ad5ff61..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.832256,0.00349,1150.0 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/main.log deleted file mode 100644 index 66a78f727f79da5cc7569a0daba3e5c4eb295714..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 16:49:53,957][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 16:49:53,958][benchmark][INFO] - + Setting seed(42) -[2023-08-21 16:49:54,410][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 16:49:54,410][backend][INFO] - Configuring pytorch backend -[2023-08-21 16:49:54,410][backend][INFO] - + Checking initial device isolation -[2023-08-21 16:49:54,410][backend][INFO] - + Checking contineous device isolation -[2023-08-21 16:49:54,411][pytorch][INFO] - + Disabling gradients -[2023-08-21 16:49:54,411][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 16:49:54,520][pytorch][INFO] - + Turning on eval mode -[2023-08-21 16:49:54,521][inference][INFO] - Running inference benchmark -[2023-08-21 16:49:54,649][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:49:54,650][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 16:49:54,693][inference][INFO] - + Forward pass peak memory: 468.832256 (MB) -[2023-08-21 16:49:54,694][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:49:54,695][inference][INFO] - + Warming up the forward pass -[2023-08-21 16:49:54,737][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 16:49:59,784][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-08-21 16:49:59,786][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-21 16:49:59,786][inference][INFO] - Saving inference results -[2023-08-21 16:49:59,794][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 593ec799d20af76d6792de62d2a1ab2318c73954..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 6b2cc2ee20e6b9dc83be359b5938a5103a24b281..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.04934399999996,0.00381,262.0,0.543,184.0 diff --git a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 86d883e3d8c22bc0d3076c4c4825b2263db73e47..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:23:17_6f041fcbb853adc6c37da85515384ed9a9c5b181/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 16:50:04,514][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 16:50:04,515][benchmark][INFO] - + Setting seed(42) -[2023-08-21 16:50:05,897][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 16:50:05,898][backend][INFO] - Configuring pytorch backend -[2023-08-21 16:50:05,898][backend][INFO] - + Checking initial device isolation -[2023-08-21 16:50:05,898][backend][INFO] - + Checking contineous device isolation -[2023-08-21 16:50:05,898][pytorch][INFO] - + Disabling gradients -[2023-08-21 16:50:05,899][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 16:50:06,542][pytorch][INFO] - + Turning on eval mode -[2023-08-21 16:50:06,543][inference][INFO] - Running inference benchmark -[2023-08-21 16:50:06,874][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 16:50:06,924][inference][INFO] - + Forward pass peak memory: 469.04934399999996 (MB) -[2023-08-21 16:50:06,925][inference][INFO] - + Warming up the forward pass -[2023-08-21 16:50:06,959][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 16:50:12,004][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-21 16:50:12,005][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-21 16:50:12,006][inference][INFO] - + Warming up the generation pass -[2023-08-21 16:50:12,585][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 16:50:18,018][inference][INFO] - + Generation pass latency: 5.43e-01 (s) -[2023-08-21 16:50:18,019][inference][INFO] - + Generation pass throughput: 184.00 (tokens/s) -[2023-08-21 16:50:18,020][inference][INFO] - Saving inference results -[2023-08-21 16:50:18,031][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 78ee60cc6c0a491f615389adcd34f4a15ce4b58d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index f553022d304197067dff0d6a9d7db088dedce631..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.31264,0.00317,315.0 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/main.log b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/main.log deleted file mode 100644 index eb4b3c1c66994451a0ae12ba5285094582cf8f39..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 16:51:22,476][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 16:51:22,477][benchmark][INFO] - + Setting seed(42) -[2023-08-21 16:51:23,715][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 16:51:23,716][backend][INFO] - Configuring pytorch backend -[2023-08-21 16:51:23,716][backend][INFO] - + Checking initial device isolation -[2023-08-21 16:51:23,716][backend][INFO] - + Checking contineous device isolation -[2023-08-21 16:51:23,716][pytorch][INFO] - + Disabling gradients -[2023-08-21 16:51:23,716][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 16:51:24,328][pytorch][INFO] - + Turning on eval mode -[2023-08-21 16:51:24,329][inference][INFO] - Running inference benchmark -[2023-08-21 16:51:24,445][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:51:24,446][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 16:51:24,502][inference][INFO] - + Forward pass peak memory: 467.31264 (MB) -[2023-08-21 16:51:24,504][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:51:24,505][inference][INFO] - + Warming up the forward pass -[2023-08-21 16:51:24,537][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 16:51:29,587][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-08-21 16:51:29,589][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-08-21 16:51:29,589][inference][INFO] - Saving inference results -[2023-08-21 16:51:29,601][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 83d87875bfd4b31e35b552dcbc5ff39268cbc880..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 143bee1bb344de4878424676a391292b4199ae64..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.410368,0.00349,1150.0 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/main.log b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/main.log deleted file mode 100644 index d5a1289d5710e928db626f363f1c766e36cd6bf0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-21 16:51:29,971][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 16:51:29,972][benchmark][INFO] - + Setting seed(42) -[2023-08-21 16:51:30,414][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-21 16:51:30,414][backend][INFO] - Configuring pytorch backend -[2023-08-21 16:51:30,415][backend][INFO] - + Checking initial device isolation -[2023-08-21 16:51:30,415][backend][INFO] - + Checking contineous device isolation -[2023-08-21 16:51:30,415][pytorch][INFO] - + Disabling gradients -[2023-08-21 16:51:30,415][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 16:51:30,529][pytorch][INFO] - + Turning on eval mode -[2023-08-21 16:51:30,530][inference][INFO] - Running inference benchmark -[2023-08-21 16:51:30,647][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:51:30,648][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 16:51:30,690][inference][INFO] - + Forward pass peak memory: 468.410368 (MB) -[2023-08-21 16:51:30,691][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-21 16:51:30,693][inference][INFO] - + Warming up the forward pass -[2023-08-21 16:51:30,729][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 16:51:35,775][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-08-21 16:51:35,776][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-21 16:51:35,776][inference][INFO] - Saving inference results -[2023-08-21 16:51:35,785][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9aa3ec4f3cc801e02a535e5c8d2933ed772d6de0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fe92cdf191868d83928bef8d7620fef3e2f0d815..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.26643199999995,0.0038,263.0,0.502,199.0 diff --git a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 68718657ea5a1eb24138bd47f65bfef2b5309444..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-21_15:35:00_450a181d8b963b4e896be4aac701815aa554a6bb/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-21 16:51:40,523][benchmark][INFO] - Configuring inference benchmark -[2023-08-21 16:51:40,525][benchmark][INFO] - + Setting seed(42) -[2023-08-21 16:51:41,934][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-21 16:51:41,934][backend][INFO] - Configuring pytorch backend -[2023-08-21 16:51:41,934][backend][INFO] - + Checking initial device isolation -[2023-08-21 16:51:41,935][backend][INFO] - + Checking contineous device isolation -[2023-08-21 16:51:41,935][pytorch][INFO] - + Disabling gradients -[2023-08-21 16:51:41,935][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-21 16:51:42,557][pytorch][INFO] - + Turning on eval mode -[2023-08-21 16:51:42,557][inference][INFO] - Running inference benchmark -[2023-08-21 16:51:42,748][inference][INFO] - + Tracking forward pass peak memory -[2023-08-21 16:51:42,795][inference][INFO] - + Forward pass peak memory: 469.26643199999995 (MB) -[2023-08-21 16:51:42,796][inference][INFO] - + Warming up the forward pass -[2023-08-21 16:51:42,828][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-21 16:51:47,873][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-21 16:51:47,875][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-21 16:51:47,875][inference][INFO] - + Warming up the generation pass -[2023-08-21 16:51:48,372][inference][INFO] - + Tracking generation latency and throughput -[2023-08-21 16:51:53,394][inference][INFO] - + Generation pass latency: 5.02e-01 (s) -[2023-08-21 16:51:53,395][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-08-21 16:51:53,395][inference][INFO] - Saving inference results -[2023-08-21 16:51:53,408][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e8b56d6be9c66eb036c6566d5ada806b8ad348e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a1acb7493028e960a07eb422892c41a3fa1bdd7e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06278399999997,0.00398,251.0 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/main.log deleted file mode 100644 index f6e788f4b02919030421b0fe56c01f57bf4df891..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 06:49:42,589][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:49:42,590][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:49:43,977][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 06:49:43,977][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:49:43,977][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:49:43,978][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:49:43,978][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:49:43,978][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:49:44,921][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:49:44,922][inference][INFO] - Running inference benchmark -[2023-08-22 06:49:45,039][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:49:45,041][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:49:45,104][inference][INFO] - + Forward pass peak memory: 467.06278399999997 (MB) -[2023-08-22 06:49:45,106][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:49:45,107][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:49:45,141][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:49:50,186][inference][INFO] - + Forward pass latency: 3.98e-03 (s) -[2023-08-22 06:49:50,187][inference][INFO] - + Forward pass throughput: 251.00 (samples/s) -[2023-08-22 06:49:50,188][inference][INFO] - Saving inference results -[2023-08-22 06:49:50,198][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8ddf95689df70e6f2df106e7b6b3b2a99dac64e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1e1ac3bda33d48eba58e9e3f9728378ed8f47ba0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.066304,0.00435,920.0 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6a318baf644ab4f36f9e63ad643e6b8a0b101401..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 06:49:50,661][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:49:50,662][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:49:51,238][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 06:49:51,238][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:49:51,238][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:49:51,238][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:49:51,239][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:49:51,239][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:49:51,356][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:49:51,357][inference][INFO] - Running inference benchmark -[2023-08-22 06:49:51,484][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:49:51,485][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:49:51,534][inference][INFO] - + Forward pass peak memory: 468.066304 (MB) -[2023-08-22 06:49:51,536][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:49:51,537][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:49:51,582][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:49:56,626][inference][INFO] - + Forward pass latency: 4.35e-03 (s) -[2023-08-22 06:49:56,627][inference][INFO] - + Forward pass throughput: 920.00 (samples/s) -[2023-08-22 06:49:56,627][inference][INFO] - Saving inference results -[2023-08-22 06:49:56,635][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 833904603ed86411586861cb5bb6995c8ac08e40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index edea291ac29cfd2e39fe9ac7f54daea75230cb7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.09439999999995,0.00342,292.0,0.48,208.0 diff --git a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a5642c775141978f68660efb37ca7e10731a133e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_05:55:35_58c36bea74ef8f5a4464d04ab2191d0b1bec6de7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 06:50:01,859][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:50:01,860][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:50:03,248][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 06:50:03,248][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:50:03,248][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:50:03,248][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:50:03,249][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:50:03,249][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:50:03,922][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:50:03,923][inference][INFO] - Running inference benchmark -[2023-08-22 06:50:04,117][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:50:04,166][inference][INFO] - + Forward pass peak memory: 469.09439999999995 (MB) -[2023-08-22 06:50:04,168][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:50:04,202][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:50:09,248][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-08-22 06:50:09,249][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-08-22 06:50:09,250][inference][INFO] - + Warming up the generation pass -[2023-08-22 06:50:09,743][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 06:50:15,026][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-08-22 06:50:15,027][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-22 06:50:15,027][inference][INFO] - Saving inference results -[2023-08-22 06:50:15,040][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index dd72fed88ea1751d96a4464f3ce153b0b1c44281..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0e8bad804b82a1f96a029c55e25766d2c5996ec8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.030016,0.00317,315.0 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/main.log deleted file mode 100644 index 66af046fae794bf6a5c44d0b4a1a945e74968975..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 06:51:18,758][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:51:18,759][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:51:19,955][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 06:51:19,955][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:51:19,955][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:51:19,955][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:51:19,956][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:51:19,956][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:51:20,569][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:51:20,569][inference][INFO] - Running inference benchmark -[2023-08-22 06:51:20,701][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:51:20,702][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:51:20,757][inference][INFO] - + Forward pass peak memory: 467.030016 (MB) -[2023-08-22 06:51:20,758][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:51:20,759][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:51:20,792][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:51:25,841][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-08-22 06:51:25,842][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-08-22 06:51:25,842][inference][INFO] - Saving inference results -[2023-08-22 06:51:25,852][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 65640050b03ae258072a296b6492f6b659d8ce0e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3a21e45f52cadd37e8ac869ae6bb1b1d0422dc76..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.13183999999995,0.00352,1140.0 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6861d43afb481efe00962f719aa24a5fe6b43ca2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 06:51:26,212][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:51:26,213][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:51:26,647][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 06:51:26,648][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:51:26,648][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:51:26,648][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:51:26,648][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:51:26,648][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:51:26,760][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:51:26,761][inference][INFO] - Running inference benchmark -[2023-08-22 06:51:26,950][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:51:26,951][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:51:26,995][inference][INFO] - + Forward pass peak memory: 468.13183999999995 (MB) -[2023-08-22 06:51:26,996][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:51:26,997][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:51:27,049][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:51:32,095][inference][INFO] - + Forward pass latency: 3.52e-03 (s) -[2023-08-22 06:51:32,096][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-08-22 06:51:32,096][inference][INFO] - Saving inference results -[2023-08-22 06:51:32,104][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f4cc7b5c4be784e3b422f0b709862fcd0b1d63fd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 51e40de5685d26747bad18561a545b66177f85ff..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.25824,0.00375,267.0,0.498,201.0 diff --git a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e091e7461621ed1bc5be1232b44afa4738e94a5a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:12:57_182b83749a7058547e1e882c603cbf97e20259f8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 06:51:36,839][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:51:36,840][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:51:38,296][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 06:51:38,297][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:51:38,297][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:51:38,297][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:51:38,297][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:51:38,298][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:51:39,107][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:51:39,108][inference][INFO] - Running inference benchmark -[2023-08-22 06:51:39,320][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:51:39,368][inference][INFO] - + Forward pass peak memory: 469.25824 (MB) -[2023-08-22 06:51:39,370][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:51:39,402][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:51:44,445][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-22 06:51:44,447][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-22 06:51:44,448][inference][INFO] - + Warming up the generation pass -[2023-08-22 06:51:45,039][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 06:51:50,516][inference][INFO] - + Generation pass latency: 4.98e-01 (s) -[2023-08-22 06:51:50,517][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-08-22 06:51:50,518][inference][INFO] - Saving inference results -[2023-08-22 06:51:50,529][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index da0aeba64290c13b1014fc4fbc46f43ffcbb6cfd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 49c8b8e64488079958a48889de91d540e0e0a53c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.972672,0.00333,300.0 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/main.log deleted file mode 100644 index b315a565fb1143915e7b9b08849f8ff2665ec7de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 06:52:53,654][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:52:53,655][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:52:54,862][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 06:52:54,863][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:52:54,863][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:52:54,863][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:52:54,863][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:52:54,864][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:52:55,489][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:52:55,490][inference][INFO] - Running inference benchmark -[2023-08-22 06:52:55,608][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:52:55,609][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:52:55,668][inference][INFO] - + Forward pass peak memory: 466.972672 (MB) -[2023-08-22 06:52:55,669][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:52:55,671][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:52:55,707][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:53:00,757][inference][INFO] - + Forward pass latency: 3.33e-03 (s) -[2023-08-22 06:53:00,758][inference][INFO] - + Forward pass throughput: 300.00 (samples/s) -[2023-08-22 06:53:00,758][inference][INFO] - Saving inference results -[2023-08-22 06:53:00,769][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e1e4579b0a6eb6b2105cf03173b4edfaeb6988f8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 12389596dd150ffd2f03a2ba5a1ca98c7be315ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.045824,0.00343,1170.0 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9602b53f9a0746a4f7862eeafe2597c63b3418ed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 06:53:01,147][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:53:01,148][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:53:01,599][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 06:53:01,599][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:53:01,599][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:53:01,600][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:53:01,600][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:53:01,600][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:53:01,718][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:53:01,719][inference][INFO] - Running inference benchmark -[2023-08-22 06:53:01,843][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:53:01,845][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:53:01,891][inference][INFO] - + Forward pass peak memory: 468.045824 (MB) -[2023-08-22 06:53:01,892][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 06:53:01,894][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:53:01,943][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:53:06,991][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-22 06:53:06,992][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-22 06:53:06,992][inference][INFO] - Saving inference results -[2023-08-22 06:53:07,001][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 84d780626351cf5b79054e1878ae6829a39cfcf5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 44b52ae4739e644c99efa0a6045858599d611ab0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.082112,0.0037,270.0,0.493,203.0 diff --git a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index bdff1624a5c0bd250d671218c202307d0ad3d8db..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_06:22:45_6a314ea7cd01a78a58403bc83e7c637ef83e6b26/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 06:53:11,755][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 06:53:11,756][benchmark][INFO] - + Setting seed(42) -[2023-08-22 06:53:13,304][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 06:53:13,305][backend][INFO] - Configuring pytorch backend -[2023-08-22 06:53:13,305][backend][INFO] - + Checking initial device isolation -[2023-08-22 06:53:13,305][backend][INFO] - + Checking contineous device isolation -[2023-08-22 06:53:13,305][pytorch][INFO] - + Disabling gradients -[2023-08-22 06:53:13,305][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 06:53:14,065][pytorch][INFO] - + Turning on eval mode -[2023-08-22 06:53:14,066][inference][INFO] - Running inference benchmark -[2023-08-22 06:53:14,289][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 06:53:14,341][inference][INFO] - + Forward pass peak memory: 469.082112 (MB) -[2023-08-22 06:53:14,343][inference][INFO] - + Warming up the forward pass -[2023-08-22 06:53:14,375][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 06:53:19,420][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-08-22 06:53:19,422][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-08-22 06:53:19,422][inference][INFO] - + Warming up the generation pass -[2023-08-22 06:53:19,984][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 06:53:25,403][inference][INFO] - + Generation pass latency: 4.93e-01 (s) -[2023-08-22 06:53:25,404][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-22 06:53:25,405][inference][INFO] - Saving inference results -[2023-08-22 06:53:25,416][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index cfa4a736111579c4ac81a61cadac48c560efda45..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 313a25fe1ac778f0348d46bd1c49cb9277b460de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.976768,0.00377,265.0 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/main.log deleted file mode 100644 index 3f1d1a0925296e91d62dced9e6e4516c6ae456d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 08:50:00,975][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:50:00,976][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:50:02,203][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 08:50:02,203][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:50:02,203][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:50:02,204][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:50:02,204][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:50:02,204][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:50:02,837][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:50:02,837][inference][INFO] - Running inference benchmark -[2023-08-22 08:50:02,978][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:50:02,980][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:50:03,041][inference][INFO] - + Forward pass peak memory: 466.976768 (MB) -[2023-08-22 08:50:03,043][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:50:03,044][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:50:03,077][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:50:08,121][inference][INFO] - + Forward pass latency: 3.77e-03 (s) -[2023-08-22 08:50:08,123][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-22 08:50:08,123][inference][INFO] - Saving inference results -[2023-08-22 08:50:08,135][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 79fcdc5479bdd53b8a31ba6768f36136b343224b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 00ab1289c4912ce9eb9f06a00e89d1c3d7e5977a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.041728,0.00424,943.0 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/main.log deleted file mode 100644 index 85107813f60f3647269e29d3c5238e5f95564015..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 08:50:08,608][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:50:08,609][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:50:09,176][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 08:50:09,176][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:50:09,176][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:50:09,176][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:50:09,176][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:50:09,177][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:50:09,297][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:50:09,298][inference][INFO] - Running inference benchmark -[2023-08-22 08:50:09,560][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:50:09,561][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:50:09,608][inference][INFO] - + Forward pass peak memory: 468.041728 (MB) -[2023-08-22 08:50:09,609][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:50:09,611][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:50:09,655][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:50:14,695][inference][INFO] - + Forward pass latency: 4.24e-03 (s) -[2023-08-22 08:50:14,696][inference][INFO] - + Forward pass throughput: 943.00 (samples/s) -[2023-08-22 08:50:14,696][inference][INFO] - Saving inference results -[2023-08-22 08:50:14,704][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 4fe55b7a1a361a081bce63b31a8482f390cd5bed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 315d0f094f0b3b3d888103706e98671a4026f671..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.774912,0.00384,260.0,0.54,185.0 diff --git a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 84a5b2356f14a5f19e1858b49e25d1dd14a6e7f3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_07:54:44_88e51ba30673b42fa93b2e15760dd645d50753f0/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 08:50:19,496][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:50:19,497][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:50:21,241][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 08:50:21,242][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:50:21,242][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:50:21,242][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:50:21,242][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:50:21,242][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:50:21,890][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:50:21,891][inference][INFO] - Running inference benchmark -[2023-08-22 08:50:22,088][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:50:22,139][inference][INFO] - + Forward pass peak memory: 468.774912 (MB) -[2023-08-22 08:50:22,140][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:50:22,173][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:50:27,217][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-08-22 08:50:27,218][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-22 08:50:27,219][inference][INFO] - + Warming up the generation pass -[2023-08-22 08:50:27,751][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 08:50:33,158][inference][INFO] - + Generation pass latency: 5.40e-01 (s) -[2023-08-22 08:50:33,159][inference][INFO] - + Generation pass throughput: 185.00 (tokens/s) -[2023-08-22 08:50:33,159][inference][INFO] - Saving inference results -[2023-08-22 08:50:33,171][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fadfd6fc5325af0875802f07dc3d2242de6b5c32..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 07b192085c3e1f0d7eee7004380d1b546646d11b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.15347199999997,0.00306,327.0 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/main.log deleted file mode 100644 index 01f46ec7db55097c8d34d64b59072369ab35eb65..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 08:51:41,301][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:51:41,302][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:51:42,491][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 08:51:42,491][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:51:42,492][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:51:42,492][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:51:42,492][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:51:42,492][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:51:43,101][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:51:43,101][inference][INFO] - Running inference benchmark -[2023-08-22 08:51:43,216][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:51:43,217][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:51:43,279][inference][INFO] - + Forward pass peak memory: 466.15347199999997 (MB) -[2023-08-22 08:51:43,280][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:51:43,282][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:51:43,318][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:51:48,372][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-22 08:51:48,373][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-22 08:51:48,373][inference][INFO] - Saving inference results -[2023-08-22 08:51:48,384][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 7d82f9565dac3dd155ead4f972cf6e7942b59b84..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 103177a450fcbcf3ed4547f3b7771d3037027c73..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.27987199999995,0.0034,1180.0 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 96c689f24ac43595bd280198ab5517ef2ac85b1f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 08:51:48,750][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:51:48,751][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:51:49,199][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 08:51:49,199][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:51:49,200][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:51:49,200][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:51:49,200][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:51:49,200][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:51:49,314][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:51:49,314][inference][INFO] - Running inference benchmark -[2023-08-22 08:51:49,456][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:51:49,458][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:51:49,508][inference][INFO] - + Forward pass peak memory: 467.27987199999995 (MB) -[2023-08-22 08:51:49,509][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:51:49,510][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:51:49,545][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:51:54,594][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-22 08:51:54,595][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-22 08:51:54,595][inference][INFO] - Saving inference results -[2023-08-22 08:51:54,605][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 666cdf1847b183e511e6c05f2a0ea3e1bd54e1ed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f41ce956d04dfe71016c8dbcddaa209b8aa93529..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.09849599999995,0.00334,299.0,0.493,203.0 diff --git a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0bf45c402646b19524ff29a3dec071f5cf79071b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:13:56_edb28722c2e100a5d43e307bd4c59169c0cf86b8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 08:51:59,365][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:51:59,366][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:52:00,742][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 08:52:00,742][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:52:00,742][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:52:00,743][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:52:00,743][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:52:00,743][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:52:01,517][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:52:01,517][inference][INFO] - Running inference benchmark -[2023-08-22 08:52:01,711][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:52:01,762][inference][INFO] - + Forward pass peak memory: 469.09849599999995 (MB) -[2023-08-22 08:52:01,764][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:52:01,797][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:52:06,844][inference][INFO] - + Forward pass latency: 3.34e-03 (s) -[2023-08-22 08:52:06,846][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-22 08:52:06,846][inference][INFO] - + Warming up the generation pass -[2023-08-22 08:52:07,337][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 08:52:12,767][inference][INFO] - + Generation pass latency: 4.93e-01 (s) -[2023-08-22 08:52:12,768][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-22 08:52:12,768][inference][INFO] - Saving inference results -[2023-08-22 08:52:12,779][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 8d2c162675faa502a11f4b4929c2e94bf893ec4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 87a9deb7cf87bbfbf446edb72fadbc13f564c00e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.43609599999996,0.00316,316.0 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/main.log deleted file mode 100644 index cd50892c138ff97e2086eba728d9b6700a474b06..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 08:53:17,203][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:53:17,203][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:53:18,389][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 08:53:18,389][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:53:18,389][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:53:18,389][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:53:18,389][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:53:18,390][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:53:19,238][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:53:19,239][inference][INFO] - Running inference benchmark -[2023-08-22 08:53:19,355][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:53:19,356][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:53:19,416][inference][INFO] - + Forward pass peak memory: 466.43609599999996 (MB) -[2023-08-22 08:53:19,417][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:53:19,419][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:53:19,452][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:53:24,503][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-22 08:53:24,505][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-22 08:53:24,505][inference][INFO] - Saving inference results -[2023-08-22 08:53:24,515][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 07df8a788f7102489201590718df737dbb08478f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 4ec3a93c9646bba95f406f206ee55eb4b8a735ff..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.53792,0.00349,1150.0 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/main.log deleted file mode 100644 index e5c2072566676b1ceb7b7e208c5bc9df6a4ac199..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 08:53:24,893][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:53:24,894][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:53:25,406][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 08:53:25,406][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:53:25,406][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:53:25,406][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:53:25,407][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:53:25,407][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:53:25,521][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:53:25,522][inference][INFO] - Running inference benchmark -[2023-08-22 08:53:25,638][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:53:25,641][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:53:25,684][inference][INFO] - + Forward pass peak memory: 467.53792 (MB) -[2023-08-22 08:53:25,685][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 08:53:25,687][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:53:25,723][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:53:30,769][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-08-22 08:53:30,770][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-22 08:53:30,770][inference][INFO] - Saving inference results -[2023-08-22 08:53:30,778][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 822b8d6e17cb2d07d395940a7c65ab41efe18ada..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f1a519a4199742e6f694a595379aba2b2b45ee60..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.8896,0.00374,267.0,0.526,190.0 diff --git a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index abb114a50bc9376220a7ac8544b43c1bd27a11ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_08:39:10_36291906896904b47692c707471de9a4a963335d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 08:53:35,790][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 08:53:35,790][benchmark][INFO] - + Setting seed(42) -[2023-08-22 08:53:37,195][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 08:53:37,195][backend][INFO] - Configuring pytorch backend -[2023-08-22 08:53:37,196][backend][INFO] - + Checking initial device isolation -[2023-08-22 08:53:37,196][backend][INFO] - + Checking contineous device isolation -[2023-08-22 08:53:37,196][pytorch][INFO] - + Disabling gradients -[2023-08-22 08:53:37,196][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 08:53:37,865][pytorch][INFO] - + Turning on eval mode -[2023-08-22 08:53:37,865][inference][INFO] - Running inference benchmark -[2023-08-22 08:53:38,058][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 08:53:38,110][inference][INFO] - + Forward pass peak memory: 468.8896 (MB) -[2023-08-22 08:53:38,111][inference][INFO] - + Warming up the forward pass -[2023-08-22 08:53:38,144][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 08:53:43,188][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-08-22 08:53:43,190][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-22 08:53:43,191][inference][INFO] - + Warming up the generation pass -[2023-08-22 08:53:43,765][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 08:53:49,026][inference][INFO] - + Generation pass latency: 5.26e-01 (s) -[2023-08-22 08:53:49,026][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s) -[2023-08-22 08:53:49,026][inference][INFO] - Saving inference results -[2023-08-22 08:53:49,038][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9ab3a5a919bc5465b0abf9026b772f9339872ee7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 026bbf7aef45ee50d908c7521b33f0114797b819..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.403328,0.00317,315.0 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/main.log deleted file mode 100644 index e249eb9ac6ad47ac092d6c41b624e8480eec3378..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 12:58:22,898][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 12:58:22,898][benchmark][INFO] - + Setting seed(42) -[2023-08-22 12:58:24,104][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 12:58:24,105][backend][INFO] - Configuring pytorch backend -[2023-08-22 12:58:24,105][backend][INFO] - + Checking initial device isolation -[2023-08-22 12:58:24,105][backend][INFO] - + Checking contineous device isolation -[2023-08-22 12:58:24,105][pytorch][INFO] - + Disabling gradients -[2023-08-22 12:58:24,105][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 12:58:24,869][pytorch][INFO] - + Turning on eval mode -[2023-08-22 12:58:24,870][inference][INFO] - Running inference benchmark -[2023-08-22 12:58:24,990][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 12:58:24,991][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 12:58:25,057][inference][INFO] - + Forward pass peak memory: 466.403328 (MB) -[2023-08-22 12:58:25,059][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 12:58:25,061][inference][INFO] - + Warming up the forward pass -[2023-08-22 12:58:25,107][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 12:58:30,159][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-08-22 12:58:30,160][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-08-22 12:58:30,160][inference][INFO] - Saving inference results -[2023-08-22 12:58:30,170][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index dd40145689a92b1bb6e0d0f30854f9b1cac813fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 014c1aee1487706cddc12cf3c5481fbe231c3aae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.50924799999996,0.00344,1160.0 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/main.log deleted file mode 100644 index ee1349f46a24abf74abe64334d9e4f22a77116d7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 12:58:30,543][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 12:58:30,544][benchmark][INFO] - + Setting seed(42) -[2023-08-22 12:58:30,986][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 12:58:30,987][backend][INFO] - Configuring pytorch backend -[2023-08-22 12:58:30,987][backend][INFO] - + Checking initial device isolation -[2023-08-22 12:58:30,987][backend][INFO] - + Checking contineous device isolation -[2023-08-22 12:58:30,987][pytorch][INFO] - + Disabling gradients -[2023-08-22 12:58:30,987][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 12:58:31,096][pytorch][INFO] - + Turning on eval mode -[2023-08-22 12:58:31,096][inference][INFO] - Running inference benchmark -[2023-08-22 12:58:31,215][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 12:58:31,216][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 12:58:31,265][inference][INFO] - + Forward pass peak memory: 467.50924799999996 (MB) -[2023-08-22 12:58:31,266][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 12:58:31,267][inference][INFO] - + Warming up the forward pass -[2023-08-22 12:58:31,303][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 12:58:36,349][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-08-22 12:58:36,350][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-22 12:58:36,350][inference][INFO] - Saving inference results -[2023-08-22 12:58:36,358][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d5d66d242bfd4476a6323cb3ebcbb7520c3c1496..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 72464adc7cf2dcee7fad9ab2fc64ee853c85675c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.016576,0.00375,267.0,0.517,193.0 diff --git a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 1b9ef1ba9c9b14cf13763b306b38b35a8baa51c9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:13:38_62396cff46854dc53023236cfeb785993fa70067/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 12:58:41,124][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 12:58:41,125][benchmark][INFO] - + Setting seed(42) -[2023-08-22 12:58:42,499][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 12:58:42,499][backend][INFO] - Configuring pytorch backend -[2023-08-22 12:58:42,499][backend][INFO] - + Checking initial device isolation -[2023-08-22 12:58:42,499][backend][INFO] - + Checking contineous device isolation -[2023-08-22 12:58:42,500][pytorch][INFO] - + Disabling gradients -[2023-08-22 12:58:42,500][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 12:58:43,161][pytorch][INFO] - + Turning on eval mode -[2023-08-22 12:58:43,161][inference][INFO] - Running inference benchmark -[2023-08-22 12:58:43,354][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 12:58:43,401][inference][INFO] - + Forward pass peak memory: 469.016576 (MB) -[2023-08-22 12:58:43,402][inference][INFO] - + Warming up the forward pass -[2023-08-22 12:58:43,433][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 12:58:48,478][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-22 12:58:48,480][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-22 12:58:48,481][inference][INFO] - + Warming up the generation pass -[2023-08-22 12:58:49,067][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 12:58:54,236][inference][INFO] - + Generation pass latency: 5.17e-01 (s) -[2023-08-22 12:58:54,237][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-22 12:58:54,237][inference][INFO] - Saving inference results -[2023-08-22 12:58:54,249][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7ee5bcd82c6cc3628512a04612089ea749bb8fa3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a9050fd495c9b9b400cc3cd57f5ffc9b1e8aba70..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.18156799999997,0.00377,265.0 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8b7280a77a664c511ab6c806c067ff10cae14100..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 12:59:59,864][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 12:59:59,866][benchmark][INFO] - + Setting seed(42) -[2023-08-22 13:00:01,244][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 13:00:01,245][backend][INFO] - Configuring pytorch backend -[2023-08-22 13:00:01,245][backend][INFO] - + Checking initial device isolation -[2023-08-22 13:00:01,245][backend][INFO] - + Checking contineous device isolation -[2023-08-22 13:00:01,245][pytorch][INFO] - + Disabling gradients -[2023-08-22 13:00:01,246][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 13:00:01,981][pytorch][INFO] - + Turning on eval mode -[2023-08-22 13:00:01,982][inference][INFO] - Running inference benchmark -[2023-08-22 13:00:02,098][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 13:00:02,099][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 13:00:02,168][inference][INFO] - + Forward pass peak memory: 467.18156799999997 (MB) -[2023-08-22 13:00:02,169][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 13:00:02,171][inference][INFO] - + Warming up the forward pass -[2023-08-22 13:00:02,210][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 13:00:07,255][inference][INFO] - + Forward pass latency: 3.77e-03 (s) -[2023-08-22 13:00:07,256][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-22 13:00:07,256][inference][INFO] - Saving inference results -[2023-08-22 13:00:07,272][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f1c628efca83cde528942e57233892113b55b5d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 18084974bbff45997c67479526012ab70d7065f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.209664,0.0043,930.0 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 29f2098ac48ef938086d5001727360265012cfbb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 13:00:08,203][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 13:00:08,205][benchmark][INFO] - + Setting seed(42) -[2023-08-22 13:00:08,628][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 13:00:08,628][backend][INFO] - Configuring pytorch backend -[2023-08-22 13:00:08,629][backend][INFO] - + Checking initial device isolation -[2023-08-22 13:00:08,629][backend][INFO] - + Checking contineous device isolation -[2023-08-22 13:00:08,629][pytorch][INFO] - + Disabling gradients -[2023-08-22 13:00:08,629][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 13:00:08,742][pytorch][INFO] - + Turning on eval mode -[2023-08-22 13:00:08,743][inference][INFO] - Running inference benchmark -[2023-08-22 13:00:08,867][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 13:00:08,868][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 13:00:08,910][inference][INFO] - + Forward pass peak memory: 468.209664 (MB) -[2023-08-22 13:00:08,911][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 13:00:08,913][inference][INFO] - + Warming up the forward pass -[2023-08-22 13:00:08,955][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 13:00:13,998][inference][INFO] - + Forward pass latency: 4.30e-03 (s) -[2023-08-22 13:00:13,999][inference][INFO] - + Forward pass throughput: 930.00 (samples/s) -[2023-08-22 13:00:13,999][inference][INFO] - Saving inference results -[2023-08-22 13:00:14,007][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f1671cf777d389f8df90fbae5bd0af6cbda8aecb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 629d024422b4aed4d87b355c87326fbe360cd2bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.44255999999996,0.00397,252.0,0.529,189.0 diff --git a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6450911203e499c3b47dcb37c63128e65cb1046e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_12:50:12_e20fab0bbe8ca5b23738b670d1bd95aafbbbc53c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 13:00:19,019][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 13:00:19,019][benchmark][INFO] - + Setting seed(42) -[2023-08-22 13:00:20,365][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 13:00:20,366][backend][INFO] - Configuring pytorch backend -[2023-08-22 13:00:20,366][backend][INFO] - + Checking initial device isolation -[2023-08-22 13:00:20,366][backend][INFO] - + Checking contineous device isolation -[2023-08-22 13:00:20,366][pytorch][INFO] - + Disabling gradients -[2023-08-22 13:00:20,367][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 13:00:21,126][pytorch][INFO] - + Turning on eval mode -[2023-08-22 13:00:21,127][inference][INFO] - Running inference benchmark -[2023-08-22 13:00:21,323][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 13:00:21,366][inference][INFO] - + Forward pass peak memory: 469.44255999999996 (MB) -[2023-08-22 13:00:21,368][inference][INFO] - + Warming up the forward pass -[2023-08-22 13:00:21,398][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 13:00:26,446][inference][INFO] - + Forward pass latency: 3.97e-03 (s) -[2023-08-22 13:00:26,448][inference][INFO] - + Forward pass throughput: 252.00 (samples/s) -[2023-08-22 13:00:26,449][inference][INFO] - + Warming up the generation pass -[2023-08-22 13:00:27,070][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 13:00:32,357][inference][INFO] - + Generation pass latency: 5.29e-01 (s) -[2023-08-22 13:00:32,358][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s) -[2023-08-22 13:00:32,358][inference][INFO] - Saving inference results -[2023-08-22 13:00:32,368][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index d75c27f0ae308dc23e8d53b32de8cea4a483010a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b7a50da81676ebe2b55a38a595ba6a9d9fe9e4ff..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,465.87904,0.00375,267.0 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/main.log deleted file mode 100644 index 75ab1f1c9d672ec47dbe85f728ff833a6fdcfa40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 14:50:05,633][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 14:50:05,634][benchmark][INFO] - + Setting seed(42) -[2023-08-22 14:50:07,134][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 14:50:07,135][backend][INFO] - Configuring pytorch backend -[2023-08-22 14:50:07,135][backend][INFO] - + Checking initial device isolation -[2023-08-22 14:50:07,135][backend][INFO] - + Checking contineous device isolation -[2023-08-22 14:50:07,135][pytorch][INFO] - + Disabling gradients -[2023-08-22 14:50:07,136][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 14:50:07,753][pytorch][INFO] - + Turning on eval mode -[2023-08-22 14:50:07,753][inference][INFO] - Running inference benchmark -[2023-08-22 14:50:07,873][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 14:50:07,874][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 14:50:07,939][inference][INFO] - + Forward pass peak memory: 465.87904 (MB) -[2023-08-22 14:50:07,940][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 14:50:07,942][inference][INFO] - + Warming up the forward pass -[2023-08-22 14:50:07,974][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 14:50:13,021][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-22 14:50:13,022][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-22 14:50:13,022][inference][INFO] - Saving inference results -[2023-08-22 14:50:13,032][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4cb579e3b16d08ae97add244435a0e05f5d234cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index dbf4d1263edfd78b550a8c633952edc83bf36e5a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.845696,0.00425,941.0 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/main.log deleted file mode 100644 index 264e25e2a2f4df48a69e619a6926d2c1eec57c3c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 14:50:13,411][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 14:50:13,412][benchmark][INFO] - + Setting seed(42) -[2023-08-22 14:50:13,840][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 14:50:13,840][backend][INFO] - Configuring pytorch backend -[2023-08-22 14:50:13,840][backend][INFO] - + Checking initial device isolation -[2023-08-22 14:50:13,841][backend][INFO] - + Checking contineous device isolation -[2023-08-22 14:50:13,841][pytorch][INFO] - + Disabling gradients -[2023-08-22 14:50:13,841][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 14:50:13,956][pytorch][INFO] - + Turning on eval mode -[2023-08-22 14:50:13,957][inference][INFO] - Running inference benchmark -[2023-08-22 14:50:14,073][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 14:50:14,074][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 14:50:14,124][inference][INFO] - + Forward pass peak memory: 466.845696 (MB) -[2023-08-22 14:50:14,125][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 14:50:14,126][inference][INFO] - + Warming up the forward pass -[2023-08-22 14:50:14,170][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 14:50:19,210][inference][INFO] - + Forward pass latency: 4.25e-03 (s) -[2023-08-22 14:50:19,212][inference][INFO] - + Forward pass throughput: 941.00 (samples/s) -[2023-08-22 14:50:19,212][inference][INFO] - Saving inference results -[2023-08-22 14:50:19,220][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f371ede98141140d1b5571563df7de16be241a17..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 70bca7081a54bfdd850a316c09f4e85dd137ca6c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.22547199999997,0.00386,259.0,0.496,202.0 diff --git a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 66e19e51f556bdf045416b79ce85516899d7fde5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_14:10:30_fd56f7f0813d412c3e0848cbd6f94a23de2c07b7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 14:50:24,544][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 14:50:24,545][benchmark][INFO] - + Setting seed(42) -[2023-08-22 14:50:25,939][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 14:50:25,939][backend][INFO] - Configuring pytorch backend -[2023-08-22 14:50:25,940][backend][INFO] - + Checking initial device isolation -[2023-08-22 14:50:25,940][backend][INFO] - + Checking contineous device isolation -[2023-08-22 14:50:25,940][pytorch][INFO] - + Disabling gradients -[2023-08-22 14:50:25,940][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 14:50:26,575][pytorch][INFO] - + Turning on eval mode -[2023-08-22 14:50:26,575][inference][INFO] - Running inference benchmark -[2023-08-22 14:50:26,774][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 14:50:26,826][inference][INFO] - + Forward pass peak memory: 469.22547199999997 (MB) -[2023-08-22 14:50:26,828][inference][INFO] - + Warming up the forward pass -[2023-08-22 14:50:26,864][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 14:50:31,910][inference][INFO] - + Forward pass latency: 3.86e-03 (s) -[2023-08-22 14:50:31,912][inference][INFO] - + Forward pass throughput: 259.00 (samples/s) -[2023-08-22 14:50:31,913][inference][INFO] - + Warming up the generation pass -[2023-08-22 14:50:32,410][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 14:50:37,867][inference][INFO] - + Generation pass latency: 4.96e-01 (s) -[2023-08-22 14:50:37,868][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-08-22 14:50:37,868][inference][INFO] - Saving inference results -[2023-08-22 14:50:37,881][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 88ec1ec93f3bdba5ffa41a54c693f8162d6221a6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c79ebc86fa6c51d5659d00c0a7a8ed6d5f9def73..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.0224,0.00332,301.0 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9e683e10c20a5da7840fd80efdd50b9fcea7c3c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 16:49:53,474][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 16:49:53,475][benchmark][INFO] - + Setting seed(42) -[2023-08-22 16:49:54,657][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 16:49:54,658][backend][INFO] - Configuring pytorch backend -[2023-08-22 16:49:54,658][backend][INFO] - + Checking initial device isolation -[2023-08-22 16:49:54,658][backend][INFO] - + Checking contineous device isolation -[2023-08-22 16:49:54,658][pytorch][INFO] - + Disabling gradients -[2023-08-22 16:49:54,658][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 16:49:55,242][pytorch][INFO] - + Turning on eval mode -[2023-08-22 16:49:55,242][inference][INFO] - Running inference benchmark -[2023-08-22 16:49:55,499][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:49:55,500][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 16:49:55,563][inference][INFO] - + Forward pass peak memory: 466.0224 (MB) -[2023-08-22 16:49:55,564][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:49:55,566][inference][INFO] - + Warming up the forward pass -[2023-08-22 16:49:55,603][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 16:50:00,653][inference][INFO] - + Forward pass latency: 3.32e-03 (s) -[2023-08-22 16:50:00,654][inference][INFO] - + Forward pass throughput: 301.00 (samples/s) -[2023-08-22 16:50:00,654][inference][INFO] - Saving inference results -[2023-08-22 16:50:00,664][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 315cfd4c3ad6fab86e9a1c0678b2f6e6bafd713d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 5723bde83f72269f6d7aaca72b7d8154ee341dae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.144704,0.00369,1080.0 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7c225a1c0351c8be68e54b616e477f1937cf071c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 16:50:01,072][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 16:50:01,073][benchmark][INFO] - + Setting seed(42) -[2023-08-22 16:50:01,558][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 16:50:01,558][backend][INFO] - Configuring pytorch backend -[2023-08-22 16:50:01,558][backend][INFO] - + Checking initial device isolation -[2023-08-22 16:50:01,558][backend][INFO] - + Checking contineous device isolation -[2023-08-22 16:50:01,559][pytorch][INFO] - + Disabling gradients -[2023-08-22 16:50:01,559][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 16:50:01,670][pytorch][INFO] - + Turning on eval mode -[2023-08-22 16:50:01,671][inference][INFO] - Running inference benchmark -[2023-08-22 16:50:01,787][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:50:01,788][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 16:50:01,830][inference][INFO] - + Forward pass peak memory: 467.144704 (MB) -[2023-08-22 16:50:01,831][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:50:01,833][inference][INFO] - + Warming up the forward pass -[2023-08-22 16:50:01,869][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 16:50:06,914][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-08-22 16:50:06,915][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-08-22 16:50:06,915][inference][INFO] - Saving inference results -[2023-08-22 16:50:06,921][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index dd7af65e812ccf9a2930d728c3a1c9e15d7f09c5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4bf65bd3c0fcf64a1dae3b440ab1f90d35db7e35..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.471232,0.00347,288.0,0.493,203.0 diff --git a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6c24e70aa47da6e5a7dd8f3ab1fd9c8765a3b9fe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:08:13_5eeaef921f70acd68073d1066ccb09d7c6e6f475/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 16:50:11,785][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 16:50:11,786][benchmark][INFO] - + Setting seed(42) -[2023-08-22 16:50:13,202][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 16:50:13,202][backend][INFO] - Configuring pytorch backend -[2023-08-22 16:50:13,202][backend][INFO] - + Checking initial device isolation -[2023-08-22 16:50:13,203][backend][INFO] - + Checking contineous device isolation -[2023-08-22 16:50:13,203][pytorch][INFO] - + Disabling gradients -[2023-08-22 16:50:13,203][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 16:50:13,932][pytorch][INFO] - + Turning on eval mode -[2023-08-22 16:50:13,933][inference][INFO] - Running inference benchmark -[2023-08-22 16:50:14,119][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 16:50:14,167][inference][INFO] - + Forward pass peak memory: 469.471232 (MB) -[2023-08-22 16:50:14,168][inference][INFO] - + Warming up the forward pass -[2023-08-22 16:50:14,211][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 16:50:19,259][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-08-22 16:50:19,261][inference][INFO] - + Forward pass throughput: 288.00 (samples/s) -[2023-08-22 16:50:19,261][inference][INFO] - + Warming up the generation pass -[2023-08-22 16:50:19,762][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 16:50:25,191][inference][INFO] - + Generation pass latency: 4.93e-01 (s) -[2023-08-22 16:50:25,191][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-22 16:50:25,191][inference][INFO] - Saving inference results -[2023-08-22 16:50:25,201][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 32d60587520b793c85e76e6760203609d1c9f45c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2dc541964574046537a7cfd257a99f85593e6449..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.542016,0.0032,312.0 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/main.log deleted file mode 100644 index c451268fffd36a43b1ad83f48f1e85339f3e08b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 16:51:31,565][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 16:51:31,566][benchmark][INFO] - + Setting seed(42) -[2023-08-22 16:51:32,779][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 16:51:32,780][backend][INFO] - Configuring pytorch backend -[2023-08-22 16:51:32,780][backend][INFO] - + Checking initial device isolation -[2023-08-22 16:51:32,780][backend][INFO] - + Checking contineous device isolation -[2023-08-22 16:51:32,780][pytorch][INFO] - + Disabling gradients -[2023-08-22 16:51:32,780][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 16:51:33,378][pytorch][INFO] - + Turning on eval mode -[2023-08-22 16:51:33,379][inference][INFO] - Running inference benchmark -[2023-08-22 16:51:33,497][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:51:33,499][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 16:51:33,559][inference][INFO] - + Forward pass peak memory: 467.542016 (MB) -[2023-08-22 16:51:33,560][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:51:33,562][inference][INFO] - + Warming up the forward pass -[2023-08-22 16:51:33,606][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 16:51:38,657][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-08-22 16:51:38,658][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-22 16:51:38,658][inference][INFO] - Saving inference results -[2023-08-22 16:51:38,668][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3782cbd304e436672882aede33054773b432b47b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 353bf3a014f0d76afbe0e31646ae7c8e587f15f0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.635648,0.00509,786.0 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7b8861eed920e61743969dd8139d5f4fa497d0c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 16:51:39,033][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 16:51:39,034][benchmark][INFO] - + Setting seed(42) -[2023-08-22 16:51:39,562][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 16:51:39,562][backend][INFO] - Configuring pytorch backend -[2023-08-22 16:51:39,562][backend][INFO] - + Checking initial device isolation -[2023-08-22 16:51:39,563][backend][INFO] - + Checking contineous device isolation -[2023-08-22 16:51:39,563][pytorch][INFO] - + Disabling gradients -[2023-08-22 16:51:39,563][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 16:51:39,675][pytorch][INFO] - + Turning on eval mode -[2023-08-22 16:51:39,675][inference][INFO] - Running inference benchmark -[2023-08-22 16:51:39,798][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:51:39,799][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 16:51:39,840][inference][INFO] - + Forward pass peak memory: 468.635648 (MB) -[2023-08-22 16:51:39,841][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 16:51:39,843][inference][INFO] - + Warming up the forward pass -[2023-08-22 16:51:39,885][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 16:51:44,928][inference][INFO] - + Forward pass latency: 5.09e-03 (s) -[2023-08-22 16:51:44,929][inference][INFO] - + Forward pass throughput: 786.00 (samples/s) -[2023-08-22 16:51:44,930][inference][INFO] - Saving inference results -[2023-08-22 16:51:44,938][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6af671714586a196cfb95e6c1b3ff3acef30fc53..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index ea439be7731cfbe7ff93072ed0b885d6b16f1e8b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.05753599999997,0.00304,329.0,0.486,206.0 diff --git a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c0b2e373432cd548385d7d848704ffd99b9dcc3d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_15:21:01_908f853688c4d523780797f27f83af3c10418e92/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 16:51:49,940][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 16:51:49,941][benchmark][INFO] - + Setting seed(42) -[2023-08-22 16:51:51,328][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 16:51:51,328][backend][INFO] - Configuring pytorch backend -[2023-08-22 16:51:51,328][backend][INFO] - + Checking initial device isolation -[2023-08-22 16:51:51,328][backend][INFO] - + Checking contineous device isolation -[2023-08-22 16:51:51,328][pytorch][INFO] - + Disabling gradients -[2023-08-22 16:51:51,329][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 16:51:51,935][pytorch][INFO] - + Turning on eval mode -[2023-08-22 16:51:51,936][inference][INFO] - Running inference benchmark -[2023-08-22 16:51:52,124][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 16:51:52,172][inference][INFO] - + Forward pass peak memory: 469.05753599999997 (MB) -[2023-08-22 16:51:52,173][inference][INFO] - + Warming up the forward pass -[2023-08-22 16:51:52,209][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 16:51:57,260][inference][INFO] - + Forward pass latency: 3.04e-03 (s) -[2023-08-22 16:51:57,261][inference][INFO] - + Forward pass throughput: 329.00 (samples/s) -[2023-08-22 16:51:57,262][inference][INFO] - + Warming up the generation pass -[2023-08-22 16:51:57,755][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 16:52:03,105][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-22 16:52:03,106][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-22 16:52:03,107][inference][INFO] - Saving inference results -[2023-08-22 16:52:03,117][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1d5104240c2665de320c1bf764db76620ab36731..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c498c511a893cf06efe02edf371f7eef83a2a733..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,465.903616,0.00315,317.0 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/main.log deleted file mode 100644 index 015bdd865f01908fb8a647e830a562258b683f26..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 18:50:01,325][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 18:50:01,326][benchmark][INFO] - + Setting seed(42) -[2023-08-22 18:50:02,560][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 18:50:02,561][backend][INFO] - Configuring pytorch backend -[2023-08-22 18:50:02,561][backend][INFO] - + Checking initial device isolation -[2023-08-22 18:50:02,561][backend][INFO] - + Checking contineous device isolation -[2023-08-22 18:50:02,561][pytorch][INFO] - + Disabling gradients -[2023-08-22 18:50:02,561][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 18:50:03,597][pytorch][INFO] - + Turning on eval mode -[2023-08-22 18:50:03,598][inference][INFO] - Running inference benchmark -[2023-08-22 18:50:03,720][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 18:50:03,721][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 18:50:03,778][inference][INFO] - + Forward pass peak memory: 465.903616 (MB) -[2023-08-22 18:50:03,779][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 18:50:03,781][inference][INFO] - + Warming up the forward pass -[2023-08-22 18:50:03,818][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 18:50:08,869][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-22 18:50:08,870][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-22 18:50:08,871][inference][INFO] - Saving inference results -[2023-08-22 18:50:08,881][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c23ccf8bb61651674a79825a54a42e53c11ed18f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ec51f72197c00e14e63ffbcedd0193593f692116..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.00134399999996,0.00392,1020.0 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6a636fc87e42d5396c4f26225a0924ece5e53db0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 18:50:09,255][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 18:50:09,256][benchmark][INFO] - + Setting seed(42) -[2023-08-22 18:50:09,677][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 18:50:09,677][backend][INFO] - Configuring pytorch backend -[2023-08-22 18:50:09,677][backend][INFO] - + Checking initial device isolation -[2023-08-22 18:50:09,678][backend][INFO] - + Checking contineous device isolation -[2023-08-22 18:50:09,678][pytorch][INFO] - + Disabling gradients -[2023-08-22 18:50:09,678][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 18:50:09,795][pytorch][INFO] - + Turning on eval mode -[2023-08-22 18:50:09,795][inference][INFO] - Running inference benchmark -[2023-08-22 18:50:09,912][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 18:50:09,913][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 18:50:09,950][inference][INFO] - + Forward pass peak memory: 467.00134399999996 (MB) -[2023-08-22 18:50:09,951][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 18:50:09,953][inference][INFO] - + Warming up the forward pass -[2023-08-22 18:50:09,987][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 18:50:15,033][inference][INFO] - + Forward pass latency: 3.92e-03 (s) -[2023-08-22 18:50:15,034][inference][INFO] - + Forward pass throughput: 1020.00 (samples/s) -[2023-08-22 18:50:15,035][inference][INFO] - Saving inference results -[2023-08-22 18:50:15,044][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ac5b78c55e6365dd62b53fe6b66f8b2806e36e4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c50d283f918f3d18b483e0dc9be4d4036c181bac..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.04934399999996,0.00317,315.0,0.485,206.0 diff --git a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8a0dc8a2bb18c9340ff433a9b340b550c3481e81..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_18:28:38_977b2f05d5697f33e51111e4834a127a9a76349f/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 18:50:19,941][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 18:50:19,942][benchmark][INFO] - + Setting seed(42) -[2023-08-22 18:50:21,320][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 18:50:21,320][backend][INFO] - Configuring pytorch backend -[2023-08-22 18:50:21,320][backend][INFO] - + Checking initial device isolation -[2023-08-22 18:50:21,320][backend][INFO] - + Checking contineous device isolation -[2023-08-22 18:50:21,320][pytorch][INFO] - + Disabling gradients -[2023-08-22 18:50:21,321][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 18:50:21,963][pytorch][INFO] - + Turning on eval mode -[2023-08-22 18:50:21,963][inference][INFO] - Running inference benchmark -[2023-08-22 18:50:22,155][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 18:50:22,202][inference][INFO] - + Forward pass peak memory: 469.04934399999996 (MB) -[2023-08-22 18:50:22,204][inference][INFO] - + Warming up the forward pass -[2023-08-22 18:50:22,240][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 18:50:27,291][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-08-22 18:50:27,293][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-08-22 18:50:27,293][inference][INFO] - + Warming up the generation pass -[2023-08-22 18:50:27,805][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 18:50:33,145][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-08-22 18:50:33,146][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-22 18:50:33,146][inference][INFO] - Saving inference results -[2023-08-22 18:50:33,158][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 308a70368d2c6ce08b70ed3b6f8c7ea0f08de945..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 5d751ebc856925564de0639da9ef55e41b8385ea..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.55487999999997,0.00318,314.0 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/main.log b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/main.log deleted file mode 100644 index 6c3c76f7e39480fe2f423b0c811448d847349eb9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 22:49:56,192][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 22:49:56,193][benchmark][INFO] - + Setting seed(42) -[2023-08-22 22:49:57,433][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 22:49:57,434][backend][INFO] - Configuring pytorch backend -[2023-08-22 22:49:57,434][backend][INFO] - + Checking initial device isolation -[2023-08-22 22:49:57,434][backend][INFO] - + Checking contineous device isolation -[2023-08-22 22:49:57,434][pytorch][INFO] - + Disabling gradients -[2023-08-22 22:49:57,434][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 22:49:58,053][pytorch][INFO] - + Turning on eval mode -[2023-08-22 22:49:58,054][inference][INFO] - Running inference benchmark -[2023-08-22 22:49:58,171][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 22:49:58,172][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 22:49:58,236][inference][INFO] - + Forward pass peak memory: 466.55487999999997 (MB) -[2023-08-22 22:49:58,237][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 22:49:58,240][inference][INFO] - + Warming up the forward pass -[2023-08-22 22:49:58,286][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 22:50:03,334][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-08-22 22:50:03,335][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-08-22 22:50:03,336][inference][INFO] - Saving inference results -[2023-08-22 22:50:03,346][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3a73bbeaf07b1c2f4fb6883b5c2d5d180f6d8a18..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e100fda2cec8a190b4ea1e79f05d51e2b6f0baca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.68947199999997,0.00369,1080.0 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/main.log b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6f7961f675382a1fe33c8524a682fe1d207d80ab..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-22 22:50:03,731][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 22:50:03,732][benchmark][INFO] - + Setting seed(42) -[2023-08-22 22:50:04,251][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-22 22:50:04,251][backend][INFO] - Configuring pytorch backend -[2023-08-22 22:50:04,251][backend][INFO] - + Checking initial device isolation -[2023-08-22 22:50:04,252][backend][INFO] - + Checking contineous device isolation -[2023-08-22 22:50:04,252][pytorch][INFO] - + Disabling gradients -[2023-08-22 22:50:04,252][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 22:50:04,371][pytorch][INFO] - + Turning on eval mode -[2023-08-22 22:50:04,372][inference][INFO] - Running inference benchmark -[2023-08-22 22:50:04,487][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 22:50:04,488][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 22:50:04,528][inference][INFO] - + Forward pass peak memory: 467.68947199999997 (MB) -[2023-08-22 22:50:04,529][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-22 22:50:04,531][inference][INFO] - + Warming up the forward pass -[2023-08-22 22:50:04,565][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 22:50:09,613][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-08-22 22:50:09,614][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-08-22 22:50:09,614][inference][INFO] - Saving inference results -[2023-08-22 22:50:09,621][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 60f70d4e15b0bcce2b539aef755474c9fe493422..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 339debfbbe43282fd3da0e71265e4f24304a5c3a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.573632,0.00431,232.0,0.515,194.0 diff --git a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 76b758972272da1d28b10d29987a35a4ffaf48a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-22_20:58:55_40a0cabd93f86a7c09406159ad03a3804c2940da/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-22 22:50:14,712][benchmark][INFO] - Configuring inference benchmark -[2023-08-22 22:50:14,712][benchmark][INFO] - + Setting seed(42) -[2023-08-22 22:50:16,169][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-22 22:50:16,169][backend][INFO] - Configuring pytorch backend -[2023-08-22 22:50:16,170][backend][INFO] - + Checking initial device isolation -[2023-08-22 22:50:16,170][backend][INFO] - + Checking contineous device isolation -[2023-08-22 22:50:16,170][pytorch][INFO] - + Disabling gradients -[2023-08-22 22:50:16,170][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-22 22:50:16,841][pytorch][INFO] - + Turning on eval mode -[2023-08-22 22:50:16,842][inference][INFO] - Running inference benchmark -[2023-08-22 22:50:17,042][inference][INFO] - + Tracking forward pass peak memory -[2023-08-22 22:50:17,087][inference][INFO] - + Forward pass peak memory: 469.573632 (MB) -[2023-08-22 22:50:17,088][inference][INFO] - + Warming up the forward pass -[2023-08-22 22:50:17,124][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-22 22:50:22,168][inference][INFO] - + Forward pass latency: 4.31e-03 (s) -[2023-08-22 22:50:22,171][inference][INFO] - + Forward pass throughput: 232.00 (samples/s) -[2023-08-22 22:50:22,172][inference][INFO] - + Warming up the generation pass -[2023-08-22 22:50:22,774][inference][INFO] - + Tracking generation latency and throughput -[2023-08-22 22:50:27,926][inference][INFO] - + Generation pass latency: 5.15e-01 (s) -[2023-08-22 22:50:27,926][inference][INFO] - + Generation pass throughput: 194.00 (tokens/s) -[2023-08-22 22:50:27,927][inference][INFO] - Saving inference results -[2023-08-22 22:50:27,938][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2e51a03b955bcadc0f038362f3f3b7b87eeb6ec1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 21f764cded6a46acff32cc7572fa21e9658469e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.57536,0.00316,316.0 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/main.log deleted file mode 100644 index ab8a199b45d04794a6971707564b84070a73284d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 01:34:28,174][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 01:34:28,175][benchmark][INFO] - + Setting seed(42) -[2023-08-23 01:34:29,445][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 01:34:29,445][backend][INFO] - Configuring pytorch backend -[2023-08-23 01:34:29,446][backend][INFO] - + Checking initial device isolation -[2023-08-23 01:34:29,446][backend][INFO] - + Checking contineous device isolation -[2023-08-23 01:34:29,446][pytorch][INFO] - + Disabling gradients -[2023-08-23 01:34:29,446][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 01:34:30,065][pytorch][INFO] - + Turning on eval mode -[2023-08-23 01:34:30,066][inference][INFO] - Running inference benchmark -[2023-08-23 01:34:30,195][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 01:34:30,197][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 01:34:30,256][inference][INFO] - + Forward pass peak memory: 466.57536 (MB) -[2023-08-23 01:34:30,257][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 01:34:30,259][inference][INFO] - + Warming up the forward pass -[2023-08-23 01:34:30,301][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 01:34:35,351][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-23 01:34:35,353][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-23 01:34:35,353][inference][INFO] - Saving inference results -[2023-08-23 01:34:35,363][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6dad7de6a0f9a522f3ef66a07354696a7329e24f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index fb6b5ac84c06c31f50f357e08ff3717420889136..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.67718399999995,0.00356,1120.0 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/main.log deleted file mode 100644 index 21b107bc8e0e001ffd31ed01a7e4fd6314dcd692..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 01:34:35,739][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 01:34:35,740][benchmark][INFO] - + Setting seed(42) -[2023-08-23 01:34:36,202][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 01:34:36,202][backend][INFO] - Configuring pytorch backend -[2023-08-23 01:34:36,202][backend][INFO] - + Checking initial device isolation -[2023-08-23 01:34:36,203][backend][INFO] - + Checking contineous device isolation -[2023-08-23 01:34:36,203][pytorch][INFO] - + Disabling gradients -[2023-08-23 01:34:36,203][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 01:34:36,315][pytorch][INFO] - + Turning on eval mode -[2023-08-23 01:34:36,316][inference][INFO] - Running inference benchmark -[2023-08-23 01:34:36,438][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 01:34:36,439][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 01:34:36,479][inference][INFO] - + Forward pass peak memory: 467.67718399999995 (MB) -[2023-08-23 01:34:36,480][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 01:34:36,482][inference][INFO] - + Warming up the forward pass -[2023-08-23 01:34:36,518][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 01:34:41,562][inference][INFO] - + Forward pass latency: 3.56e-03 (s) -[2023-08-23 01:34:41,563][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-23 01:34:41,563][inference][INFO] - Saving inference results -[2023-08-23 01:34:41,571][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d7dfbd751b874fc181142f34e09ca612ce10dc33..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fdda02674fd0cdcadab039b5765ef1c3a73e1563..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.061632,0.00318,314.0,0.494,202.0 diff --git a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5954bba57d1417bf3df0d0c7ff9fce150b2d2230..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_00:14:54_57943630e24651e6d954b912e7fcdb2b4c719cc4/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 01:34:46,513][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 01:34:46,513][benchmark][INFO] - + Setting seed(42) -[2023-08-23 01:34:48,184][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 01:34:48,184][backend][INFO] - Configuring pytorch backend -[2023-08-23 01:34:48,184][backend][INFO] - + Checking initial device isolation -[2023-08-23 01:34:48,184][backend][INFO] - + Checking contineous device isolation -[2023-08-23 01:34:48,184][pytorch][INFO] - + Disabling gradients -[2023-08-23 01:34:48,185][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 01:34:48,832][pytorch][INFO] - + Turning on eval mode -[2023-08-23 01:34:48,832][inference][INFO] - Running inference benchmark -[2023-08-23 01:34:49,024][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 01:34:49,075][inference][INFO] - + Forward pass peak memory: 469.061632 (MB) -[2023-08-23 01:34:49,077][inference][INFO] - + Warming up the forward pass -[2023-08-23 01:34:49,114][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 01:34:54,163][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-08-23 01:34:54,165][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-08-23 01:34:54,165][inference][INFO] - + Warming up the generation pass -[2023-08-23 01:34:54,665][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 01:35:00,102][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-08-23 01:35:00,103][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-08-23 01:35:00,103][inference][INFO] - Saving inference results -[2023-08-23 01:35:00,114][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6f35abe8006d0f02fc27a7f59c164534e2608c19..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 4ddc626961c9f93640aec598708ed7819846bf22..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06278399999997,0.00318,314.0 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 89f83a569a93f37eb9e72fd9610d4dc27de3b5d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 06:49:54,649][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:49:54,649][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:49:55,979][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 06:49:55,980][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:49:55,980][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:49:55,980][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:49:55,980][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:49:55,980][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:49:56,629][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:49:56,630][inference][INFO] - Running inference benchmark -[2023-08-23 06:49:56,752][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:49:56,754][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:49:56,824][inference][INFO] - + Forward pass peak memory: 467.06278399999997 (MB) -[2023-08-23 06:49:56,826][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:49:56,827][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:49:56,860][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:50:01,909][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-08-23 06:50:01,911][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-08-23 06:50:01,911][inference][INFO] - Saving inference results -[2023-08-23 06:50:01,921][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 802efd3251129fd7035e48a8e1e3621a65633aac..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f0d309aa6d9bf34549bfd89d0dd7c40d8db0e3a7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.160512,0.00355,1130.0 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/main.log deleted file mode 100644 index b468a96e1aef66660e209e6a287e5fc2b241498c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 06:50:02,289][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:50:02,290][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:50:02,739][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 06:50:02,739][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:50:02,739][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:50:02,739][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:50:02,740][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:50:02,740][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:50:02,870][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:50:02,871][inference][INFO] - Running inference benchmark -[2023-08-23 06:50:02,996][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:50:02,997][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:50:03,047][inference][INFO] - + Forward pass peak memory: 468.160512 (MB) -[2023-08-23 06:50:03,048][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:50:03,050][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:50:03,087][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:50:08,132][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-08-23 06:50:08,133][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-23 06:50:08,134][inference][INFO] - Saving inference results -[2023-08-23 06:50:08,142][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2ab1fbbc617bb0c93a3325e0710c70b7d490b08a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fd194db9bcbe415c489eeaa2abe10dbc4d6fd4b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.09439999999995,0.00382,262.0,0.51,196.0 diff --git a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5c1141e068c1413829730ccfc0bd38a856e594a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:16:43_51794bf21ee6c9b9a702a3bceeea167e9518880b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 06:50:12,898][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:50:12,900][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:50:14,475][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 06:50:14,475][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:50:14,475][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:50:14,475][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:50:14,475][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:50:14,475][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:50:15,219][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:50:15,219][inference][INFO] - Running inference benchmark -[2023-08-23 06:50:15,578][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:50:15,629][inference][INFO] - + Forward pass peak memory: 469.09439999999995 (MB) -[2023-08-23 06:50:15,631][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:50:15,664][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:50:20,709][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-23 06:50:20,710][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-23 06:50:20,711][inference][INFO] - + Warming up the generation pass -[2023-08-23 06:50:21,209][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 06:50:26,307][inference][INFO] - + Generation pass latency: 5.10e-01 (s) -[2023-08-23 06:50:26,308][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-08-23 06:50:26,308][inference][INFO] - Saving inference results -[2023-08-23 06:50:26,320][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index db44c18876cceecf0bfd832236c1859c5cd343b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3b123444dfe62ff3a0c8777e4b1767f6777aa0f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.030016,0.00318,314.0 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/main.log deleted file mode 100644 index b63c7d211f8547defa56f3ead734d67f9037372c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 06:51:32,035][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:51:32,036][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:51:33,257][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 06:51:33,257][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:51:33,257][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:51:33,257][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:51:33,257][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:51:33,258][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:51:33,877][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:51:33,877][inference][INFO] - Running inference benchmark -[2023-08-23 06:51:33,997][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:51:33,999][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:51:34,061][inference][INFO] - + Forward pass peak memory: 467.030016 (MB) -[2023-08-23 06:51:34,062][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:51:34,064][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:51:34,096][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:51:39,148][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-08-23 06:51:39,150][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-08-23 06:51:39,150][inference][INFO] - Saving inference results -[2023-08-23 06:51:39,162][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c71599bd9b4730ac9cbfbb9f58c180c085f33553..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 044c101bea44cc28b7853255f724917538aef065..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.127744,0.00348,1150.0 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9ad800bb939ac7bb00f52b0bd98b79d1f7785da6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 06:51:39,553][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:51:39,554][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:51:39,998][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 06:51:39,998][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:51:39,999][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:51:39,999][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:51:39,999][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:51:39,999][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:51:40,111][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:51:40,111][inference][INFO] - Running inference benchmark -[2023-08-23 06:51:40,233][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:51:40,235][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:51:40,279][inference][INFO] - + Forward pass peak memory: 468.127744 (MB) -[2023-08-23 06:51:40,280][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:51:40,282][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:51:40,318][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:51:45,366][inference][INFO] - + Forward pass latency: 3.48e-03 (s) -[2023-08-23 06:51:45,368][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-23 06:51:45,368][inference][INFO] - Saving inference results -[2023-08-23 06:51:45,376][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index bd7d3b3622589eed189c582308fc42ab3bdc9b2d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 139e6d08cdbd7f96742a68cd45633b2845666417..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.32377599999995,0.00381,262.0,0.509,196.0 diff --git a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2eaca5f58b1d1071e14f887afb1b50816f6173e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_05:49:19_db587220844538787f560c8a797f1268fef9099d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 06:51:50,150][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:51:50,151][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:51:51,566][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 06:51:51,567][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:51:51,567][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:51:51,567][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:51:51,567][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:51:51,568][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:51:52,193][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:51:52,193][inference][INFO] - Running inference benchmark -[2023-08-23 06:51:52,397][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:51:52,448][inference][INFO] - + Forward pass peak memory: 469.32377599999995 (MB) -[2023-08-23 06:51:52,449][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:51:52,482][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:51:57,525][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-23 06:51:57,526][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-23 06:51:57,527][inference][INFO] - + Warming up the generation pass -[2023-08-23 06:51:58,114][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 06:52:03,208][inference][INFO] - + Generation pass latency: 5.09e-01 (s) -[2023-08-23 06:52:03,209][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-08-23 06:52:03,209][inference][INFO] - Saving inference results -[2023-08-23 06:52:03,222][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 8ed42ef4b5e7ef93fe628f74599064aada780e11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e3c29afe19712ad60ea1800ed75eeada6ac2469a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.54668799999996,0.00379,264.0 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/main.log deleted file mode 100644 index a5905b11a1684681986b47b57919f71f8512f381..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 06:53:08,036][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:53:08,038][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:53:09,268][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 06:53:09,269][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:53:09,269][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:53:09,269][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:53:09,269][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:53:09,269][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:53:09,909][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:53:09,909][inference][INFO] - Running inference benchmark -[2023-08-23 06:53:10,029][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:53:10,031][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:53:10,094][inference][INFO] - + Forward pass peak memory: 466.54668799999996 (MB) -[2023-08-23 06:53:10,095][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:53:10,097][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:53:10,140][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:53:15,186][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-23 06:53:15,188][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-23 06:53:15,188][inference][INFO] - Saving inference results -[2023-08-23 06:53:15,198][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ba3863c3560907bc171948457152f97ab4f4365e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9ce09cc0d5c516420c2b0e312949914c6ddeca14..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.57068799999996,0.00429,932.0 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/main.log deleted file mode 100644 index 831d599a6d3bdd05412f5bac441d03db1b929057..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 06:53:15,578][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:53:15,579][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:53:16,044][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 06:53:16,044][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:53:16,045][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:53:16,045][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:53:16,045][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:53:16,045][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:53:16,171][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:53:16,172][inference][INFO] - Running inference benchmark -[2023-08-23 06:53:16,318][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:53:16,319][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:53:16,364][inference][INFO] - + Forward pass peak memory: 467.57068799999996 (MB) -[2023-08-23 06:53:16,365][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 06:53:16,367][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:53:16,411][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:53:21,449][inference][INFO] - + Forward pass latency: 4.29e-03 (s) -[2023-08-23 06:53:21,450][inference][INFO] - + Forward pass throughput: 932.00 (samples/s) -[2023-08-23 06:53:21,450][inference][INFO] - Saving inference results -[2023-08-23 06:53:21,457][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 5ecfe99da6a51f127f31a704b2cfa73d82beadca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 82c2f56f58476653a6ee1c5423313ebd04693aa6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.03296,0.00343,292.0,0.484,207.0 diff --git a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index d314bc8d9fed3de04de03cf3e6d7f16d3344022c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_06:34:30_3d1edb6c5d36bf6426e72223f534266ff29c45c4/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 06:53:26,187][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 06:53:26,188][benchmark][INFO] - + Setting seed(42) -[2023-08-23 06:53:27,621][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 06:53:27,622][backend][INFO] - Configuring pytorch backend -[2023-08-23 06:53:27,622][backend][INFO] - + Checking initial device isolation -[2023-08-23 06:53:27,622][backend][INFO] - + Checking contineous device isolation -[2023-08-23 06:53:27,622][pytorch][INFO] - + Disabling gradients -[2023-08-23 06:53:27,623][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 06:53:28,272][pytorch][INFO] - + Turning on eval mode -[2023-08-23 06:53:28,273][inference][INFO] - Running inference benchmark -[2023-08-23 06:53:28,467][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 06:53:28,515][inference][INFO] - + Forward pass peak memory: 469.03296 (MB) -[2023-08-23 06:53:28,517][inference][INFO] - + Warming up the forward pass -[2023-08-23 06:53:28,549][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 06:53:33,594][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-23 06:53:33,596][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-08-23 06:53:33,597][inference][INFO] - + Warming up the generation pass -[2023-08-23 06:53:34,088][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 06:53:39,408][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-08-23 06:53:39,409][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-23 06:53:39,409][inference][INFO] - Saving inference results -[2023-08-23 06:53:39,421][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b63560e3aeb9cbab689070b33621987b9de3e863..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c8982fce9b281275149a43fb55ef4961b7c3598b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.54668799999996,0.00411,243.0 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 433d9d5b1f9fc80c8c696986e538f1b50ea45ace..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 10:50:15,039][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 10:50:15,040][benchmark][INFO] - + Setting seed(42) -[2023-08-23 10:50:16,289][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 10:50:16,290][backend][INFO] - Configuring pytorch backend -[2023-08-23 10:50:16,290][backend][INFO] - + Checking initial device isolation -[2023-08-23 10:50:16,290][backend][INFO] - + Checking contineous device isolation -[2023-08-23 10:50:16,290][pytorch][INFO] - + Disabling gradients -[2023-08-23 10:50:16,291][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 10:50:16,942][pytorch][INFO] - + Turning on eval mode -[2023-08-23 10:50:16,942][inference][INFO] - Running inference benchmark -[2023-08-23 10:50:17,063][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 10:50:17,065][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 10:50:17,129][inference][INFO] - + Forward pass peak memory: 466.54668799999996 (MB) -[2023-08-23 10:50:17,130][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 10:50:17,132][inference][INFO] - + Warming up the forward pass -[2023-08-23 10:50:17,167][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 10:50:22,214][inference][INFO] - + Forward pass latency: 4.11e-03 (s) -[2023-08-23 10:50:22,215][inference][INFO] - + Forward pass throughput: 243.00 (samples/s) -[2023-08-23 10:50:22,215][inference][INFO] - Saving inference results -[2023-08-23 10:50:22,227][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index dec44ed9419009f1551c69f03f2d93d18c92a45f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a5cea01db0608e440ff1c95526966395c92d4ba6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.56249599999995,0.00356,1120.0 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 81ecc7c65bec19b692018747b01368d0d040f42c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 10:50:22,632][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 10:50:22,634][benchmark][INFO] - + Setting seed(42) -[2023-08-23 10:50:23,080][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 10:50:23,080][backend][INFO] - Configuring pytorch backend -[2023-08-23 10:50:23,080][backend][INFO] - + Checking initial device isolation -[2023-08-23 10:50:23,080][backend][INFO] - + Checking contineous device isolation -[2023-08-23 10:50:23,081][pytorch][INFO] - + Disabling gradients -[2023-08-23 10:50:23,081][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 10:50:23,199][pytorch][INFO] - + Turning on eval mode -[2023-08-23 10:50:23,200][inference][INFO] - Running inference benchmark -[2023-08-23 10:50:23,336][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 10:50:23,338][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 10:50:23,382][inference][INFO] - + Forward pass peak memory: 467.56249599999995 (MB) -[2023-08-23 10:50:23,383][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 10:50:23,385][inference][INFO] - + Warming up the forward pass -[2023-08-23 10:50:23,428][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 10:50:28,475][inference][INFO] - + Forward pass latency: 3.56e-03 (s) -[2023-08-23 10:50:28,477][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-23 10:50:28,477][inference][INFO] - Saving inference results -[2023-08-23 10:50:28,486][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6a647b29795c090ac6181fdb39633fc50a21cfea..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3fb87e419a76ab5a2a90ccc06383af58ac238dd2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.10259199999996,0.00306,327.0,0.488,205.0 diff --git a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 07a5175030834c54f181fd942e575b545fed4e60..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_09:14:45_b413e0610b42d4c8d9c7a69c06440ad27c69808b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 10:50:33,373][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 10:50:33,374][benchmark][INFO] - + Setting seed(42) -[2023-08-23 10:50:34,760][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 10:50:34,760][backend][INFO] - Configuring pytorch backend -[2023-08-23 10:50:34,761][backend][INFO] - + Checking initial device isolation -[2023-08-23 10:50:34,761][backend][INFO] - + Checking contineous device isolation -[2023-08-23 10:50:34,761][pytorch][INFO] - + Disabling gradients -[2023-08-23 10:50:34,761][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 10:50:35,401][pytorch][INFO] - + Turning on eval mode -[2023-08-23 10:50:35,402][inference][INFO] - Running inference benchmark -[2023-08-23 10:50:35,594][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 10:50:35,643][inference][INFO] - + Forward pass peak memory: 469.10259199999996 (MB) -[2023-08-23 10:50:35,645][inference][INFO] - + Warming up the forward pass -[2023-08-23 10:50:35,676][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 10:50:40,734][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-23 10:50:40,736][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-23 10:50:40,737][inference][INFO] - + Warming up the generation pass -[2023-08-23 10:50:41,229][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 10:50:46,594][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-08-23 10:50:46,595][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-23 10:50:46,596][inference][INFO] - Saving inference results -[2023-08-23 10:50:46,612][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index d39179c941970ab435d54bad07cbccf4d6f0a074..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b29aa592033dabb46c052cb32f147c52482dece6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.6368,0.00308,325.0 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/main.log deleted file mode 100644 index b2819d25b77630593e2bbd5d05a268b19fd5b81c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 12:58:20,742][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 12:58:20,743][benchmark][INFO] - + Setting seed(42) -[2023-08-23 12:58:21,985][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 12:58:21,986][backend][INFO] - Configuring pytorch backend -[2023-08-23 12:58:21,986][backend][INFO] - + Checking initial device isolation -[2023-08-23 12:58:21,986][backend][INFO] - + Checking contineous device isolation -[2023-08-23 12:58:21,986][pytorch][INFO] - + Disabling gradients -[2023-08-23 12:58:21,987][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 12:58:22,621][pytorch][INFO] - + Turning on eval mode -[2023-08-23 12:58:22,622][inference][INFO] - Running inference benchmark -[2023-08-23 12:58:22,737][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 12:58:22,738][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 12:58:22,798][inference][INFO] - + Forward pass peak memory: 466.6368 (MB) -[2023-08-23 12:58:22,799][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 12:58:22,801][inference][INFO] - + Warming up the forward pass -[2023-08-23 12:58:22,837][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 12:58:27,892][inference][INFO] - + Forward pass latency: 3.08e-03 (s) -[2023-08-23 12:58:27,893][inference][INFO] - + Forward pass throughput: 325.00 (samples/s) -[2023-08-23 12:58:27,894][inference][INFO] - Saving inference results -[2023-08-23 12:58:27,905][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 87ddb3cf8480794edfc94eba8bc19fa34b0c8d59..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 8dcf2db982d41ed62f9cf7e2635bd39c667826aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.697664,0.00343,1170.0 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/main.log deleted file mode 100644 index e711d2e6597f57e4f33dfd2f579d8116e1541e7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 12:58:28,298][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 12:58:28,300][benchmark][INFO] - + Setting seed(42) -[2023-08-23 12:58:28,739][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 12:58:28,740][backend][INFO] - Configuring pytorch backend -[2023-08-23 12:58:28,740][backend][INFO] - + Checking initial device isolation -[2023-08-23 12:58:28,740][backend][INFO] - + Checking contineous device isolation -[2023-08-23 12:58:28,740][pytorch][INFO] - + Disabling gradients -[2023-08-23 12:58:28,740][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 12:58:28,859][pytorch][INFO] - + Turning on eval mode -[2023-08-23 12:58:28,859][inference][INFO] - Running inference benchmark -[2023-08-23 12:58:28,980][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 12:58:28,981][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 12:58:29,027][inference][INFO] - + Forward pass peak memory: 467.697664 (MB) -[2023-08-23 12:58:29,028][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 12:58:29,030][inference][INFO] - + Warming up the forward pass -[2023-08-23 12:58:29,080][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 12:58:34,127][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-23 12:58:34,129][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-23 12:58:34,129][inference][INFO] - Saving inference results -[2023-08-23 12:58:34,138][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 85df1d93e4de131aa3cc8abb5988345dd0447b5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cb95f9ed34ff66dad01cba8475effb2a417045f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.008384,0.00306,327.0,0.509,196.0 diff --git a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 335c9443ec5d0126588b501a9497d97e2bdf8092..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:07:46_2cf87e2bbb46ff7ec9dd2746d694d41b6815fb43/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 12:58:39,017][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 12:58:39,019][benchmark][INFO] - + Setting seed(42) -[2023-08-23 12:58:40,766][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 12:58:40,766][backend][INFO] - Configuring pytorch backend -[2023-08-23 12:58:40,766][backend][INFO] - + Checking initial device isolation -[2023-08-23 12:58:40,766][backend][INFO] - + Checking contineous device isolation -[2023-08-23 12:58:40,767][pytorch][INFO] - + Disabling gradients -[2023-08-23 12:58:40,767][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 12:58:41,418][pytorch][INFO] - + Turning on eval mode -[2023-08-23 12:58:41,419][inference][INFO] - Running inference benchmark -[2023-08-23 12:58:41,637][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 12:58:41,690][inference][INFO] - + Forward pass peak memory: 469.008384 (MB) -[2023-08-23 12:58:41,691][inference][INFO] - + Warming up the forward pass -[2023-08-23 12:58:41,728][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 12:58:46,782][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-23 12:58:46,784][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-23 12:58:46,785][inference][INFO] - + Warming up the generation pass -[2023-08-23 12:58:47,280][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 12:58:52,368][inference][INFO] - + Generation pass latency: 5.09e-01 (s) -[2023-08-23 12:58:52,368][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-08-23 12:58:52,368][inference][INFO] - Saving inference results -[2023-08-23 12:58:52,384][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ff65936c15bf46e5ae606218b2d9a327836c3d8c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2207911e745d69f2e6162cc0d78f31247ca6d9dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.39455999999996,0.00312,321.0 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5c79ad06e3d7ba73ea6cd4c2d6f854f8555f44ca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 12:59:57,873][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 12:59:57,873][benchmark][INFO] - + Setting seed(42) -[2023-08-23 12:59:59,075][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 12:59:59,076][backend][INFO] - Configuring pytorch backend -[2023-08-23 12:59:59,076][backend][INFO] - + Checking initial device isolation -[2023-08-23 12:59:59,076][backend][INFO] - + Checking contineous device isolation -[2023-08-23 12:59:59,076][pytorch][INFO] - + Disabling gradients -[2023-08-23 12:59:59,076][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 12:59:59,725][pytorch][INFO] - + Turning on eval mode -[2023-08-23 12:59:59,726][inference][INFO] - Running inference benchmark -[2023-08-23 12:59:59,854][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 12:59:59,855][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 12:59:59,921][inference][INFO] - + Forward pass peak memory: 467.39455999999996 (MB) -[2023-08-23 12:59:59,922][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 12:59:59,924][inference][INFO] - + Warming up the forward pass -[2023-08-23 12:59:59,961][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 13:00:05,013][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-08-23 13:00:05,014][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-08-23 13:00:05,014][inference][INFO] - Saving inference results -[2023-08-23 13:00:05,024][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 13cf848912c364393f0d174fe44f11b00a4b97ba..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a1c5733614d8d94c4dead4451d503cfd2fb0bb05..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.496384,0.00345,1160.0 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 27a414c06fb37906c2d73828fcf018cec7ab8c87..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 13:00:05,423][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 13:00:05,424][benchmark][INFO] - + Setting seed(42) -[2023-08-23 13:00:05,894][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 13:00:05,895][backend][INFO] - Configuring pytorch backend -[2023-08-23 13:00:05,895][backend][INFO] - + Checking initial device isolation -[2023-08-23 13:00:05,895][backend][INFO] - + Checking contineous device isolation -[2023-08-23 13:00:05,895][pytorch][INFO] - + Disabling gradients -[2023-08-23 13:00:05,896][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 13:00:06,005][pytorch][INFO] - + Turning on eval mode -[2023-08-23 13:00:06,006][inference][INFO] - Running inference benchmark -[2023-08-23 13:00:06,137][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 13:00:06,138][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 13:00:06,179][inference][INFO] - + Forward pass peak memory: 468.496384 (MB) -[2023-08-23 13:00:06,180][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 13:00:06,182][inference][INFO] - + Warming up the forward pass -[2023-08-23 13:00:06,218][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 13:00:11,263][inference][INFO] - + Forward pass latency: 3.45e-03 (s) -[2023-08-23 13:00:11,264][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-23 13:00:11,265][inference][INFO] - Saving inference results -[2023-08-23 13:00:11,273][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f1a7e14e5cbd8d8a5e9b7386880aa1bc3dabcd8e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a5f86b97fce847ac17f00c190612a293e13563e5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.479424,0.00372,269.0,0.544,184.0 diff --git a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6dcb893d4512e8f7ef9a8b38dcc818fe81e68d23..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:17:37_77cb2ab7921c5b2336916eb7874c807bf86ad33c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 13:00:15,978][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 13:00:15,979][benchmark][INFO] - + Setting seed(42) -[2023-08-23 13:00:17,849][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 13:00:17,850][backend][INFO] - Configuring pytorch backend -[2023-08-23 13:00:17,850][backend][INFO] - + Checking initial device isolation -[2023-08-23 13:00:17,850][backend][INFO] - + Checking contineous device isolation -[2023-08-23 13:00:17,850][pytorch][INFO] - + Disabling gradients -[2023-08-23 13:00:17,850][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 13:00:18,479][pytorch][INFO] - + Turning on eval mode -[2023-08-23 13:00:18,479][inference][INFO] - Running inference benchmark -[2023-08-23 13:00:18,679][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 13:00:18,730][inference][INFO] - + Forward pass peak memory: 469.479424 (MB) -[2023-08-23 13:00:18,731][inference][INFO] - + Warming up the forward pass -[2023-08-23 13:00:18,764][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 13:00:23,810][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-08-23 13:00:23,812][inference][INFO] - + Forward pass throughput: 269.00 (samples/s) -[2023-08-23 13:00:23,812][inference][INFO] - + Warming up the generation pass -[2023-08-23 13:00:24,393][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 13:00:29,832][inference][INFO] - + Generation pass latency: 5.44e-01 (s) -[2023-08-23 13:00:29,833][inference][INFO] - + Generation pass throughput: 184.00 (tokens/s) -[2023-08-23 13:00:29,834][inference][INFO] - Saving inference results -[2023-08-23 13:00:29,845][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0d8170c26a32c7e139aa896c7e464caa960298fc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index bbd33432a9813f173b7ae8094e120d78c5e67a15..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.66956799999997,0.00322,311.0 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 3f66382883659300a1a68d00086897e29f16f218..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 13:01:36,013][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 13:01:36,014][benchmark][INFO] - + Setting seed(42) -[2023-08-23 13:01:37,455][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 13:01:37,455][backend][INFO] - Configuring pytorch backend -[2023-08-23 13:01:37,455][backend][INFO] - + Checking initial device isolation -[2023-08-23 13:01:37,456][backend][INFO] - + Checking contineous device isolation -[2023-08-23 13:01:37,456][pytorch][INFO] - + Disabling gradients -[2023-08-23 13:01:37,456][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 13:01:38,140][pytorch][INFO] - + Turning on eval mode -[2023-08-23 13:01:38,141][inference][INFO] - Running inference benchmark -[2023-08-23 13:01:38,260][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 13:01:38,262][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 13:01:38,318][inference][INFO] - + Forward pass peak memory: 466.66956799999997 (MB) -[2023-08-23 13:01:38,320][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 13:01:38,321][inference][INFO] - + Warming up the forward pass -[2023-08-23 13:01:38,354][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 13:01:43,402][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-08-23 13:01:43,404][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-08-23 13:01:43,404][inference][INFO] - Saving inference results -[2023-08-23 13:01:43,415][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c686b4ab4f4835950057637b7fbd484ca307d067..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9bdc04aa522ea0ad3b94d59e08bbe9f3ca2496d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.705856,0.00353,1130.0 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7ca89298b60188bd3a07bc1c23daf1333f4e7830..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 13:01:43,807][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 13:01:43,807][benchmark][INFO] - + Setting seed(42) -[2023-08-23 13:01:44,398][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 13:01:44,398][backend][INFO] - Configuring pytorch backend -[2023-08-23 13:01:44,398][backend][INFO] - + Checking initial device isolation -[2023-08-23 13:01:44,399][backend][INFO] - + Checking contineous device isolation -[2023-08-23 13:01:44,399][pytorch][INFO] - + Disabling gradients -[2023-08-23 13:01:44,399][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 13:01:44,512][pytorch][INFO] - + Turning on eval mode -[2023-08-23 13:01:44,512][inference][INFO] - Running inference benchmark -[2023-08-23 13:01:44,639][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 13:01:44,640][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 13:01:44,683][inference][INFO] - + Forward pass peak memory: 467.705856 (MB) -[2023-08-23 13:01:44,684][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 13:01:44,686][inference][INFO] - + Warming up the forward pass -[2023-08-23 13:01:44,723][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 13:01:49,766][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-23 13:01:49,767][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-23 13:01:49,768][inference][INFO] - Saving inference results -[2023-08-23 13:01:49,775][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a6d1b7568ce3f4c474a606a938a0aa46ee1bd786..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3b49cbc58cc305b5c6e658c164d2690a4d48500e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.33196799999996,0.00383,261.0,0.533,188.0 diff --git a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b230ef4e8d8f8b8a7b4c360b15d93269c31819bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_12:21:07_8657ec68fc01c289245f3c71725353eef055fc3c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 13:01:54,605][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 13:01:54,606][benchmark][INFO] - + Setting seed(42) -[2023-08-23 13:01:56,603][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 13:01:56,603][backend][INFO] - Configuring pytorch backend -[2023-08-23 13:01:56,604][backend][INFO] - + Checking initial device isolation -[2023-08-23 13:01:56,604][backend][INFO] - + Checking contineous device isolation -[2023-08-23 13:01:56,604][pytorch][INFO] - + Disabling gradients -[2023-08-23 13:01:56,604][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 13:01:57,253][pytorch][INFO] - + Turning on eval mode -[2023-08-23 13:01:57,254][inference][INFO] - Running inference benchmark -[2023-08-23 13:01:57,451][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 13:01:57,501][inference][INFO] - + Forward pass peak memory: 469.33196799999996 (MB) -[2023-08-23 13:01:57,502][inference][INFO] - + Warming up the forward pass -[2023-08-23 13:01:57,535][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 13:02:02,579][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-23 13:02:02,581][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-23 13:02:02,582][inference][INFO] - + Warming up the generation pass -[2023-08-23 13:02:03,170][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 13:02:08,503][inference][INFO] - + Generation pass latency: 5.33e-01 (s) -[2023-08-23 13:02:08,504][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-08-23 13:02:08,504][inference][INFO] - Saving inference results -[2023-08-23 13:02:08,515][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 61fad12008ab118e4b14955808fde98974d19066..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b702afb1a86bba952d3f00152479e226784e6c63..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.263488,0.0038,263.0 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/main.log deleted file mode 100644 index 01f9354a572326b106ee3f1de7da9bdb0e54e88e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 14:49:57,809][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 14:49:57,810][benchmark][INFO] - + Setting seed(42) -[2023-08-23 14:49:59,185][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 14:49:59,186][backend][INFO] - Configuring pytorch backend -[2023-08-23 14:49:59,186][backend][INFO] - + Checking initial device isolation -[2023-08-23 14:49:59,186][backend][INFO] - + Checking contineous device isolation -[2023-08-23 14:49:59,186][pytorch][INFO] - + Disabling gradients -[2023-08-23 14:49:59,186][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 14:49:59,813][pytorch][INFO] - + Turning on eval mode -[2023-08-23 14:49:59,813][inference][INFO] - Running inference benchmark -[2023-08-23 14:50:00,100][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 14:50:00,101][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 14:50:00,168][inference][INFO] - + Forward pass peak memory: 467.263488 (MB) -[2023-08-23 14:50:00,169][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 14:50:00,171][inference][INFO] - + Warming up the forward pass -[2023-08-23 14:50:00,208][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 14:50:05,254][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-23 14:50:05,256][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-23 14:50:05,256][inference][INFO] - Saving inference results -[2023-08-23 14:50:05,267][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 42ab332b70f9f26032f6e35f3c6f1b7702ae4ddd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1f2267cd3e195a7b7acb1bda51ca422c50763d1d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.31615999999997,0.00436,917.0 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/main.log deleted file mode 100644 index 3d862e6cb485a5239ec3a260606d460e9e2519e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 14:50:05,651][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 14:50:05,652][benchmark][INFO] - + Setting seed(42) -[2023-08-23 14:50:06,094][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 14:50:06,094][backend][INFO] - Configuring pytorch backend -[2023-08-23 14:50:06,094][backend][INFO] - + Checking initial device isolation -[2023-08-23 14:50:06,094][backend][INFO] - + Checking contineous device isolation -[2023-08-23 14:50:06,095][pytorch][INFO] - + Disabling gradients -[2023-08-23 14:50:06,095][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 14:50:06,220][pytorch][INFO] - + Turning on eval mode -[2023-08-23 14:50:06,221][inference][INFO] - Running inference benchmark -[2023-08-23 14:50:06,350][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 14:50:06,351][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 14:50:06,396][inference][INFO] - + Forward pass peak memory: 468.31615999999997 (MB) -[2023-08-23 14:50:06,397][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 14:50:06,399][inference][INFO] - + Warming up the forward pass -[2023-08-23 14:50:06,442][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 14:50:11,483][inference][INFO] - + Forward pass latency: 4.36e-03 (s) -[2023-08-23 14:50:11,484][inference][INFO] - + Forward pass throughput: 917.00 (samples/s) -[2023-08-23 14:50:11,484][inference][INFO] - Saving inference results -[2023-08-23 14:50:11,492][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f58d2dd9cde6eeb20d302991273f136112f6c057..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d10bdd53648fe28c0d780b52d8930d7c243ecbbf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.10668799999996,0.00335,299.0,0.483,207.0 diff --git a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0c0f1be7374d76aa1a8a72ed5653ceea782eb4fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_14:39:28_2189a7f54a5ec10a7559a93fa7e6eaca527d2941/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 14:50:16,240][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 14:50:16,241][benchmark][INFO] - + Setting seed(42) -[2023-08-23 14:50:17,696][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 14:50:17,696][backend][INFO] - Configuring pytorch backend -[2023-08-23 14:50:17,696][backend][INFO] - + Checking initial device isolation -[2023-08-23 14:50:17,696][backend][INFO] - + Checking contineous device isolation -[2023-08-23 14:50:17,696][pytorch][INFO] - + Disabling gradients -[2023-08-23 14:50:17,697][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 14:50:18,340][pytorch][INFO] - + Turning on eval mode -[2023-08-23 14:50:18,340][inference][INFO] - Running inference benchmark -[2023-08-23 14:50:18,553][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 14:50:18,603][inference][INFO] - + Forward pass peak memory: 469.10668799999996 (MB) -[2023-08-23 14:50:18,605][inference][INFO] - + Warming up the forward pass -[2023-08-23 14:50:18,638][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 14:50:23,688][inference][INFO] - + Forward pass latency: 3.35e-03 (s) -[2023-08-23 14:50:23,690][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-23 14:50:23,691][inference][INFO] - + Warming up the generation pass -[2023-08-23 14:50:24,182][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 14:50:29,498][inference][INFO] - + Generation pass latency: 4.83e-01 (s) -[2023-08-23 14:50:29,499][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-23 14:50:29,499][inference][INFO] - Saving inference results -[2023-08-23 14:50:29,511][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6e216cb2383ba3679c08c61611ebde9b691ad3a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index bfd45db960ec9f16cbe61abf4eb8010dc9f0d976..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.83750399999997,0.0032,312.0 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/main.log deleted file mode 100644 index 93596bea7deb6e465be0f605e1cf9ad894b605c9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 16:49:47,479][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 16:49:47,480][benchmark][INFO] - + Setting seed(42) -[2023-08-23 16:49:49,269][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 16:49:49,270][backend][INFO] - Configuring pytorch backend -[2023-08-23 16:49:49,270][backend][INFO] - + Checking initial device isolation -[2023-08-23 16:49:49,270][backend][INFO] - + Checking contineous device isolation -[2023-08-23 16:49:49,270][pytorch][INFO] - + Disabling gradients -[2023-08-23 16:49:49,270][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 16:49:49,898][pytorch][INFO] - + Turning on eval mode -[2023-08-23 16:49:49,899][inference][INFO] - Running inference benchmark -[2023-08-23 16:49:50,022][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 16:49:50,024][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 16:49:50,086][inference][INFO] - + Forward pass peak memory: 466.83750399999997 (MB) -[2023-08-23 16:49:50,087][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 16:49:50,089][inference][INFO] - + Warming up the forward pass -[2023-08-23 16:49:50,122][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 16:49:55,172][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-08-23 16:49:55,174][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-08-23 16:49:55,174][inference][INFO] - Saving inference results -[2023-08-23 16:49:55,185][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 743b2872510a8f1f0af6a729f1538500765237e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 5a85adf667de646bf3dd13f93f63b9cf58fc0030..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.96799999999996,0.00422,948.0 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/main.log deleted file mode 100644 index 378c0e2c3f56ab01984dfc54bf1b3ad38c6f120f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 16:49:55,566][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 16:49:55,567][benchmark][INFO] - + Setting seed(42) -[2023-08-23 16:49:56,009][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 16:49:56,009][backend][INFO] - Configuring pytorch backend -[2023-08-23 16:49:56,009][backend][INFO] - + Checking initial device isolation -[2023-08-23 16:49:56,009][backend][INFO] - + Checking contineous device isolation -[2023-08-23 16:49:56,010][pytorch][INFO] - + Disabling gradients -[2023-08-23 16:49:56,010][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 16:49:56,130][pytorch][INFO] - + Turning on eval mode -[2023-08-23 16:49:56,131][inference][INFO] - Running inference benchmark -[2023-08-23 16:49:56,257][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 16:49:56,258][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 16:49:56,302][inference][INFO] - + Forward pass peak memory: 467.96799999999996 (MB) -[2023-08-23 16:49:56,303][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 16:49:56,305][inference][INFO] - + Warming up the forward pass -[2023-08-23 16:49:56,348][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 16:50:01,391][inference][INFO] - + Forward pass latency: 4.22e-03 (s) -[2023-08-23 16:50:01,392][inference][INFO] - + Forward pass throughput: 948.00 (samples/s) -[2023-08-23 16:50:01,392][inference][INFO] - Saving inference results -[2023-08-23 16:50:01,401][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e0028321d98eb9c5fc4997de51f767c85994bb02..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 21a180162d176cdfa298c89bcb8659f861a6cc91..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.123072,0.00387,258.0,0.494,202.0 diff --git a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2a4f03041e720abdb7e8e13c12b25f65bf075696..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_16:25:28_6add3b313defc35b5d8ae3d946131aeb625e0441/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 16:50:06,166][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 16:50:06,167][benchmark][INFO] - + Setting seed(42) -[2023-08-23 16:50:07,603][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 16:50:07,603][backend][INFO] - Configuring pytorch backend -[2023-08-23 16:50:07,603][backend][INFO] - + Checking initial device isolation -[2023-08-23 16:50:07,603][backend][INFO] - + Checking contineous device isolation -[2023-08-23 16:50:07,603][pytorch][INFO] - + Disabling gradients -[2023-08-23 16:50:07,604][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 16:50:08,287][pytorch][INFO] - + Turning on eval mode -[2023-08-23 16:50:08,288][inference][INFO] - Running inference benchmark -[2023-08-23 16:50:08,517][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 16:50:08,567][inference][INFO] - + Forward pass peak memory: 469.123072 (MB) -[2023-08-23 16:50:08,569][inference][INFO] - + Warming up the forward pass -[2023-08-23 16:50:08,603][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 16:50:13,646][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-08-23 16:50:13,648][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-23 16:50:13,649][inference][INFO] - + Warming up the generation pass -[2023-08-23 16:50:14,148][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 16:50:19,584][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-08-23 16:50:19,585][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-08-23 16:50:19,585][inference][INFO] - Saving inference results -[2023-08-23 16:50:19,598][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 8134c83b702f8d4a2c71572dd3ca67ad8331c663..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b1d19f880f83776795172460b403575e9f2e729d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.00134399999996,0.00331,302.0 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/main.log deleted file mode 100644 index 82ab5934b29cf07ac8bfe1ad407f2d5bdd8fd0db..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 18:49:44,985][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:49:44,986][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:49:46,326][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 18:49:46,326][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:49:46,326][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:49:46,327][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:49:46,327][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:49:46,327][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:49:46,942][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:49:46,942][inference][INFO] - Running inference benchmark -[2023-08-23 18:49:47,057][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:49:47,059][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:49:47,122][inference][INFO] - + Forward pass peak memory: 467.00134399999996 (MB) -[2023-08-23 18:49:47,124][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:49:47,125][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:49:47,157][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:49:52,206][inference][INFO] - + Forward pass latency: 3.31e-03 (s) -[2023-08-23 18:49:52,208][inference][INFO] - + Forward pass throughput: 302.00 (samples/s) -[2023-08-23 18:49:52,208][inference][INFO] - Saving inference results -[2023-08-23 18:49:52,219][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 125d7333c06a795776a9fa9d674e8fb5f7ad2b5e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1439c3641973b8a23fc7f7df61e50ebd1bf6279c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.0704,0.00342,1170.0 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/main.log deleted file mode 100644 index bcf7f561b58b67b0ae68fd3cdd26346ff57008e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 18:49:52,616][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:49:52,617][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:49:53,091][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 18:49:53,092][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:49:53,092][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:49:53,092][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:49:53,092][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:49:53,092][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:49:53,208][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:49:53,209][inference][INFO] - Running inference benchmark -[2023-08-23 18:49:53,331][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:49:53,332][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:49:53,372][inference][INFO] - + Forward pass peak memory: 468.0704 (MB) -[2023-08-23 18:49:53,373][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:49:53,375][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:49:53,409][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:49:58,456][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-08-23 18:49:58,457][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-23 18:49:58,457][inference][INFO] - Saving inference results -[2023-08-23 18:49:58,465][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d5aeb98f2b90a1bf175a4776235a6f2a3d23fd35..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 07d17f30df8f1abc96ee16892acf242a586f5e5b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.668416,0.00391,256.0,0.531,188.0 diff --git a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f53cd829406c2eff9727829b0984fa1f6cb8bc69..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:09:14_656e17f6f7eded9df87ad59cbd064fdf5f44f708/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 18:50:03,382][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:50:03,383][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:50:05,171][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 18:50:05,172][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:50:05,172][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:50:05,173][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:50:05,173][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:50:05,173][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:50:05,828][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:50:05,828][inference][INFO] - Running inference benchmark -[2023-08-23 18:50:06,023][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:50:06,073][inference][INFO] - + Forward pass peak memory: 468.668416 (MB) -[2023-08-23 18:50:06,075][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:50:06,109][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:50:11,155][inference][INFO] - + Forward pass latency: 3.91e-03 (s) -[2023-08-23 18:50:11,157][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-08-23 18:50:11,157][inference][INFO] - + Warming up the generation pass -[2023-08-23 18:50:11,652][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 18:50:16,967][inference][INFO] - + Generation pass latency: 5.31e-01 (s) -[2023-08-23 18:50:16,967][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-08-23 18:50:16,967][inference][INFO] - Saving inference results -[2023-08-23 18:50:16,980][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 4ad09c13e4f3bd3e8d2277165f90c93f1fe895fc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 515a8a6b76de9748bd2992b32e0fc5254ef1f891..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.132416,0.00307,326.0 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/main.log deleted file mode 100644 index ed530d2e4fa9d59e38533d775b74c0dbbccf3099..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 18:51:24,088][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:51:24,089][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:51:25,518][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 18:51:25,518][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:51:25,518][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:51:25,518][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:51:25,518][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:51:25,519][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:51:26,137][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:51:26,138][inference][INFO] - Running inference benchmark -[2023-08-23 18:51:26,260][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:51:26,261][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:51:26,322][inference][INFO] - + Forward pass peak memory: 467.132416 (MB) -[2023-08-23 18:51:26,323][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:51:26,325][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:51:26,363][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:51:31,418][inference][INFO] - + Forward pass latency: 3.07e-03 (s) -[2023-08-23 18:51:31,420][inference][INFO] - + Forward pass throughput: 326.00 (samples/s) -[2023-08-23 18:51:31,420][inference][INFO] - Saving inference results -[2023-08-23 18:51:31,433][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8ccae6291b64422085485b7d71442ee528392c17..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index c4163994e0f09be49ebcb88deeb918e19e1a9403..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.24652799999996,0.0034,1180.0 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 27fe2310764ad67f9107834807d109f2084c900a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 18:51:31,828][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:51:31,829][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:51:32,314][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 18:51:32,315][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:51:32,315][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:51:32,315][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:51:32,315][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:51:32,315][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:51:32,430][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:51:32,431][inference][INFO] - Running inference benchmark -[2023-08-23 18:51:32,553][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:51:32,554][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:51:32,597][inference][INFO] - + Forward pass peak memory: 468.24652799999996 (MB) -[2023-08-23 18:51:32,598][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:51:32,600][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:51:32,636][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:51:37,684][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-23 18:51:37,686][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-23 18:51:37,686][inference][INFO] - Saving inference results -[2023-08-23 18:51:37,693][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 621deefd7c0193a0ccb78cbc63fa232ba019bf9f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8072be5c7fa7c5fad6e6c31312cf0d746c6c75ad..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.352448,0.00385,260.0,0.52,192.0 diff --git a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 3627b480457578cf353bbf914beda1df51112801..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:17:01_3c2383b1c6eb860c0511d081e670d1782cd66b8d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 18:51:42,502][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:51:42,503][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:51:43,957][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 18:51:43,958][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:51:43,958][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:51:43,958][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:51:43,959][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:51:43,959][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:51:44,603][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:51:44,603][inference][INFO] - Running inference benchmark -[2023-08-23 18:51:44,816][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:51:44,872][inference][INFO] - + Forward pass peak memory: 469.352448 (MB) -[2023-08-23 18:51:44,873][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:51:44,907][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:51:49,951][inference][INFO] - + Forward pass latency: 3.85e-03 (s) -[2023-08-23 18:51:49,952][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-23 18:51:49,953][inference][INFO] - + Warming up the generation pass -[2023-08-23 18:51:50,478][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 18:51:55,678][inference][INFO] - + Generation pass latency: 5.20e-01 (s) -[2023-08-23 18:51:55,679][inference][INFO] - + Generation pass throughput: 192.00 (tokens/s) -[2023-08-23 18:51:55,679][inference][INFO] - Saving inference results -[2023-08-23 18:51:55,691][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ccdc66d903b2c0f03e5d13e694cc41b8be8a14bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 04f4a52df40f1bc448ee335fd0690c142ace3a79..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.80064,0.00363,275.0 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/main.log b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1d8e215febed19206cd8f9b6a2464ac873ceccc0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 18:53:01,805][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:53:01,806][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:53:03,123][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 18:53:03,123][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:53:03,123][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:53:03,123][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:53:03,124][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:53:03,124][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:53:03,773][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:53:03,773][inference][INFO] - Running inference benchmark -[2023-08-23 18:53:03,897][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:53:03,898][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:53:03,964][inference][INFO] - + Forward pass peak memory: 466.80064 (MB) -[2023-08-23 18:53:03,965][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:53:03,967][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:53:04,003][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:53:09,052][inference][INFO] - + Forward pass latency: 3.63e-03 (s) -[2023-08-23 18:53:09,054][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-08-23 18:53:09,054][inference][INFO] - Saving inference results -[2023-08-23 18:53:09,067][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ae7697f94cf54ddc629bf96c6e56e2218671f320..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 04ee0ebfaf15e2b0fc144da464f16d10975d6bb4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.828736,0.00419,955.0 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/main.log b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6dc5da930127764bc38511e54f438acd7b698206..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-23 18:53:09,450][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:53:09,451][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:53:09,903][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-23 18:53:09,903][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:53:09,903][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:53:09,903][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:53:09,903][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:53:09,903][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:53:10,019][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:53:10,020][inference][INFO] - Running inference benchmark -[2023-08-23 18:53:10,146][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:53:10,148][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:53:10,192][inference][INFO] - + Forward pass peak memory: 467.828736 (MB) -[2023-08-23 18:53:10,193][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-23 18:53:10,195][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:53:10,238][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:53:15,279][inference][INFO] - + Forward pass latency: 4.19e-03 (s) -[2023-08-23 18:53:15,280][inference][INFO] - + Forward pass throughput: 955.00 (samples/s) -[2023-08-23 18:53:15,280][inference][INFO] - Saving inference results -[2023-08-23 18:53:15,289][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c25c21a3f84cda9cab3656f14ef1b31d28b2a779..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fe01be20da5ff79b4e101de44f444af8726801e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.487616,0.00383,261.0,0.527,190.0 diff --git a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 97b77f762bc0fea2bcc8329ef801fa66f4537226..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-23_18:40:03_4d40109c3a93c9b8bbca204cb046ed510f1c72e8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-23 18:53:20,195][benchmark][INFO] - Configuring inference benchmark -[2023-08-23 18:53:20,196][benchmark][INFO] - + Setting seed(42) -[2023-08-23 18:53:22,004][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-23 18:53:22,004][backend][INFO] - Configuring pytorch backend -[2023-08-23 18:53:22,004][backend][INFO] - + Checking initial device isolation -[2023-08-23 18:53:22,005][backend][INFO] - + Checking contineous device isolation -[2023-08-23 18:53:22,005][pytorch][INFO] - + Disabling gradients -[2023-08-23 18:53:22,005][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-23 18:53:22,944][pytorch][INFO] - + Turning on eval mode -[2023-08-23 18:53:22,945][inference][INFO] - Running inference benchmark -[2023-08-23 18:53:23,144][inference][INFO] - + Tracking forward pass peak memory -[2023-08-23 18:53:23,194][inference][INFO] - + Forward pass peak memory: 469.487616 (MB) -[2023-08-23 18:53:23,196][inference][INFO] - + Warming up the forward pass -[2023-08-23 18:53:23,229][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-23 18:53:28,275][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-23 18:53:28,277][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-23 18:53:28,278][inference][INFO] - + Warming up the generation pass -[2023-08-23 18:53:28,821][inference][INFO] - + Tracking generation latency and throughput -[2023-08-23 18:53:34,097][inference][INFO] - + Generation pass latency: 5.27e-01 (s) -[2023-08-23 18:53:34,098][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s) -[2023-08-23 18:53:34,098][inference][INFO] - Saving inference results -[2023-08-23 18:53:34,111][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 10c1a5c02a1ec6afebda3909910315e43df22ae6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c59320b86c658b1299e331442b0f47f38e5f42ee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.611648,0.00378,265.0 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/main.log deleted file mode 100644 index 10496e463f3a87f9ba1d2e21a753f102c90d7c57..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 06:49:50,146][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:49:50,147][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:49:51,392][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 06:49:51,392][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:49:51,393][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:49:51,393][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:49:51,393][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:49:51,393][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:49:52,013][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:49:52,014][inference][INFO] - Running inference benchmark -[2023-08-24 06:49:52,131][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:49:52,132][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:49:52,198][inference][INFO] - + Forward pass peak memory: 467.611648 (MB) -[2023-08-24 06:49:52,199][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:49:52,201][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:49:52,233][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:49:57,279][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-24 06:49:57,281][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-24 06:49:57,281][inference][INFO] - Saving inference results -[2023-08-24 06:49:57,290][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4198f09041ae54ff81c8fdbf02860a335c67e435..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index c012473f946355ca39b6c90c8fcecf1e7d2e0e66..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.63974399999995,0.00422,948.0 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/main.log deleted file mode 100644 index 01fa47118a52bdb646d507de7df02c93ac39abbf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 06:49:57,674][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:49:57,676][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:49:58,133][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 06:49:58,133][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:49:58,133][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:49:58,134][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:49:58,134][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:49:58,134][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:49:58,255][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:49:58,255][inference][INFO] - Running inference benchmark -[2023-08-24 06:49:58,386][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:49:58,387][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:49:58,432][inference][INFO] - + Forward pass peak memory: 468.63974399999995 (MB) -[2023-08-24 06:49:58,433][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:49:58,435][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:49:58,485][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:50:03,525][inference][INFO] - + Forward pass latency: 4.22e-03 (s) -[2023-08-24 06:50:03,526][inference][INFO] - + Forward pass throughput: 948.00 (samples/s) -[2023-08-24 06:50:03,526][inference][INFO] - Saving inference results -[2023-08-24 06:50:03,535][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1ef11661feba478b33f9483f2589904e7833a4f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c53fdcc070e765223725676f1226ac934330b0fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.93465599999996,0.0031,323.0,0.481,208.0 diff --git a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index dd6fbe2f5de9a42144086c634de3440e15c25ddb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_05:48:53_68fa9a5937ae7aa707f5ff2639aa36a37a0a9928/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 06:50:08,353][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:50:08,354][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:50:09,845][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 06:50:09,845][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:50:09,846][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:50:09,846][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:50:09,846][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:50:09,846][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:50:10,483][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:50:10,483][inference][INFO] - Running inference benchmark -[2023-08-24 06:50:10,683][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:50:10,732][inference][INFO] - + Forward pass peak memory: 468.93465599999996 (MB) -[2023-08-24 06:50:10,733][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:50:10,766][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:50:15,817][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-24 06:50:15,819][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-24 06:50:15,820][inference][INFO] - + Warming up the generation pass -[2023-08-24 06:50:16,308][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 06:50:21,600][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-24 06:50:21,602][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-24 06:50:21,602][inference][INFO] - Saving inference results -[2023-08-24 06:50:21,614][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f14e6fb751c8cf7d92cdb65c7634434464833e40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3de9b21e96f532e3c15099b5bbfa744e7e58296a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.156992,0.00306,327.0 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/main.log deleted file mode 100644 index e8faa97e34570ca3ba184712d6035ad69f1d893a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 06:51:28,614][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:51:28,615][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:51:29,870][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 06:51:29,870][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:51:29,871][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:51:29,871][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:51:29,871][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:51:29,871][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:51:30,485][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:51:30,486][inference][INFO] - Running inference benchmark -[2023-08-24 06:51:30,601][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:51:30,602][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:51:30,668][inference][INFO] - + Forward pass peak memory: 467.156992 (MB) -[2023-08-24 06:51:30,669][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:51:30,671][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:51:30,707][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:51:35,758][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-24 06:51:35,759][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-24 06:51:35,759][inference][INFO] - Saving inference results -[2023-08-24 06:51:35,769][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 976dbaa2d06edf16197450c11b9043e3179206c7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 04c0fc2f9c13401f6d9f78948302c0092b904973..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.262912,0.00341,1170.0 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9c63af01f6aebff74b3019b74e98ec32388e8791..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 06:51:36,135][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:51:36,136][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:51:36,565][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 06:51:36,565][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:51:36,565][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:51:36,565][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:51:36,566][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:51:36,566][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:51:36,740][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:51:36,741][inference][INFO] - Running inference benchmark -[2023-08-24 06:51:36,859][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:51:36,860][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:51:36,904][inference][INFO] - + Forward pass peak memory: 468.262912 (MB) -[2023-08-24 06:51:36,905][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:51:36,907][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:51:36,946][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:51:41,991][inference][INFO] - + Forward pass latency: 3.41e-03 (s) -[2023-08-24 06:51:41,995][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-24 06:51:41,995][inference][INFO] - Saving inference results -[2023-08-24 06:51:42,004][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 8e267c144d8794030db320a5b52fca3ac70f96b8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 497ed9773394c350a9117c748ad9cf204790b80b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.836352,0.00382,262.0,0.505,198.0 diff --git a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 07f8193c22b37eb20280df8a59b548a675099091..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:00:42_b85b88069a778f0ffbb7a0f6389e18fca9432dcf/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 06:51:46,816][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:51:46,817][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:51:48,727][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 06:51:48,727][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:51:48,728][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:51:48,728][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:51:48,728][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:51:48,728][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:51:49,368][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:51:49,368][inference][INFO] - Running inference benchmark -[2023-08-24 06:51:49,570][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:51:49,621][inference][INFO] - + Forward pass peak memory: 468.836352 (MB) -[2023-08-24 06:51:49,623][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:51:49,656][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:51:54,701][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-24 06:51:54,703][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-24 06:51:54,704][inference][INFO] - + Warming up the generation pass -[2023-08-24 06:51:55,194][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 06:52:00,245][inference][INFO] - + Generation pass latency: 5.05e-01 (s) -[2023-08-24 06:52:00,246][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s) -[2023-08-24 06:52:00,247][inference][INFO] - Saving inference results -[2023-08-24 06:52:00,259][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 255a1be36768156241114aa8d2ca7346af4b0762..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 46390cc75e7646f310d43a95d463521129e5d354..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.43552,0.00306,327.0 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/main.log deleted file mode 100644 index a2dfcd51d25c0f0f92693b05414dab923847a62c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 06:53:04,160][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:53:04,161][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:53:05,440][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 06:53:05,440][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:53:05,441][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:53:05,441][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:53:05,441][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:53:05,441][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:53:06,257][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:53:06,257][inference][INFO] - Running inference benchmark -[2023-08-24 06:53:06,372][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:53:06,373][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:53:06,428][inference][INFO] - + Forward pass peak memory: 467.43552 (MB) -[2023-08-24 06:53:06,429][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:53:06,430][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:53:06,467][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:53:11,518][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-24 06:53:11,519][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-24 06:53:11,519][inference][INFO] - Saving inference results -[2023-08-24 06:53:11,529][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 88ecd2e8a379621dd2e0974de648108c9d16fab0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index c092efdfe9e152793810a5f240852467341d408b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.557824,0.0034,1180.0 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/main.log deleted file mode 100644 index caaa3456f79e3f18c4a72e2247c6620dada21d7b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 06:53:11,892][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:53:11,893][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:53:12,354][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 06:53:12,354][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:53:12,354][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:53:12,354][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:53:12,354][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:53:12,354][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:53:12,470][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:53:12,471][inference][INFO] - Running inference benchmark -[2023-08-24 06:53:12,596][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:53:12,598][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:53:12,639][inference][INFO] - + Forward pass peak memory: 468.557824 (MB) -[2023-08-24 06:53:12,640][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 06:53:12,642][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:53:12,677][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:53:17,723][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-24 06:53:17,724][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-24 06:53:17,724][inference][INFO] - Saving inference results -[2023-08-24 06:53:17,731][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index cb7c7cb7f6df7c4f85e885f7ff9f10a76b94bf66..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0c8293036485eb4e22003d9e6afd2e176268c9ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.086208,0.00387,258.0,0.56,179.0 diff --git a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 1b0d584b6ccad0f9c12ece10736f9d3566984c7a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_06:03:34_6e6da5e4b860d98d3b625fe5c63db4e83087b6ff/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 06:53:22,467][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 06:53:22,468][benchmark][INFO] - + Setting seed(42) -[2023-08-24 06:53:23,909][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 06:53:23,910][backend][INFO] - Configuring pytorch backend -[2023-08-24 06:53:23,910][backend][INFO] - + Checking initial device isolation -[2023-08-24 06:53:23,910][backend][INFO] - + Checking contineous device isolation -[2023-08-24 06:53:23,910][pytorch][INFO] - + Disabling gradients -[2023-08-24 06:53:23,910][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 06:53:24,549][pytorch][INFO] - + Turning on eval mode -[2023-08-24 06:53:24,549][inference][INFO] - Running inference benchmark -[2023-08-24 06:53:24,752][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 06:53:24,803][inference][INFO] - + Forward pass peak memory: 469.086208 (MB) -[2023-08-24 06:53:24,805][inference][INFO] - + Warming up the forward pass -[2023-08-24 06:53:24,838][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 06:53:29,881][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-08-24 06:53:29,883][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-24 06:53:29,884][inference][INFO] - + Warming up the generation pass -[2023-08-24 06:53:30,469][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 06:53:35,510][inference][INFO] - + Generation pass latency: 5.60e-01 (s) -[2023-08-24 06:53:35,510][inference][INFO] - + Generation pass throughput: 179.00 (tokens/s) -[2023-08-24 06:53:35,511][inference][INFO] - Saving inference results -[2023-08-24 06:53:35,523][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 66c3378965c1c91d2b0252957cf18dee47bdd503..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 49ad14aa0980d966f096ebe1b717b72656df0932..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06687999999997,0.00327,306.0 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1d79268e7f0fd6618b405dbe2748b93dbd16cff5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 08:50:16,300][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 08:50:16,302][benchmark][INFO] - + Setting seed(42) -[2023-08-24 08:50:17,569][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 08:50:17,570][backend][INFO] - Configuring pytorch backend -[2023-08-24 08:50:17,570][backend][INFO] - + Checking initial device isolation -[2023-08-24 08:50:17,570][backend][INFO] - + Checking contineous device isolation -[2023-08-24 08:50:17,570][pytorch][INFO] - + Disabling gradients -[2023-08-24 08:50:17,570][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 08:50:18,234][pytorch][INFO] - + Turning on eval mode -[2023-08-24 08:50:18,234][inference][INFO] - Running inference benchmark -[2023-08-24 08:50:18,355][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 08:50:18,357][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 08:50:18,424][inference][INFO] - + Forward pass peak memory: 467.06687999999997 (MB) -[2023-08-24 08:50:18,425][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 08:50:18,427][inference][INFO] - + Warming up the forward pass -[2023-08-24 08:50:18,465][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 08:50:23,516][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-08-24 08:50:23,517][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-08-24 08:50:23,517][inference][INFO] - Saving inference results -[2023-08-24 08:50:23,528][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ad7aea857ac65c6be1367ac7101d0c6a468a9725..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 61ccdf75ae0eef20afd7ed1b014616173206d632..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.15232,0.00433,924.0 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/main.log deleted file mode 100644 index 5a06e14be335595639ceb34f99d8c047092ca629..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 08:50:23,926][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 08:50:23,927][benchmark][INFO] - + Setting seed(42) -[2023-08-24 08:50:24,396][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 08:50:24,396][backend][INFO] - Configuring pytorch backend -[2023-08-24 08:50:24,396][backend][INFO] - + Checking initial device isolation -[2023-08-24 08:50:24,397][backend][INFO] - + Checking contineous device isolation -[2023-08-24 08:50:24,397][pytorch][INFO] - + Disabling gradients -[2023-08-24 08:50:24,397][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 08:50:24,515][pytorch][INFO] - + Turning on eval mode -[2023-08-24 08:50:24,516][inference][INFO] - Running inference benchmark -[2023-08-24 08:50:24,646][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 08:50:24,647][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 08:50:24,691][inference][INFO] - + Forward pass peak memory: 468.15232 (MB) -[2023-08-24 08:50:24,692][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 08:50:24,694][inference][INFO] - + Warming up the forward pass -[2023-08-24 08:50:24,744][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 08:50:29,786][inference][INFO] - + Forward pass latency: 4.33e-03 (s) -[2023-08-24 08:50:29,787][inference][INFO] - + Forward pass throughput: 924.00 (samples/s) -[2023-08-24 08:50:29,787][inference][INFO] - Saving inference results -[2023-08-24 08:50:29,793][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 822c03e3a0489d66a64b9085a4cf8ee6fc075e0c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 9a3ae387f599993a6c6f610c5144bcf4aac669d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.311488,0.00341,293.0,0.482,207.0 diff --git a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ebc7db253e6e4e56a8246312374f2d8a218f9665..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_08:13:30_c2123626aa3cd6c1ae4869ec9bc8869d1a408166/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 08:50:34,825][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 08:50:34,826][benchmark][INFO] - + Setting seed(42) -[2023-08-24 08:50:36,247][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 08:50:36,247][backend][INFO] - Configuring pytorch backend -[2023-08-24 08:50:36,247][backend][INFO] - + Checking initial device isolation -[2023-08-24 08:50:36,248][backend][INFO] - + Checking contineous device isolation -[2023-08-24 08:50:36,248][pytorch][INFO] - + Disabling gradients -[2023-08-24 08:50:36,248][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 08:50:36,902][pytorch][INFO] - + Turning on eval mode -[2023-08-24 08:50:36,902][inference][INFO] - Running inference benchmark -[2023-08-24 08:50:37,110][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 08:50:37,157][inference][INFO] - + Forward pass peak memory: 469.311488 (MB) -[2023-08-24 08:50:37,159][inference][INFO] - + Warming up the forward pass -[2023-08-24 08:50:37,195][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 08:50:42,242][inference][INFO] - + Forward pass latency: 3.41e-03 (s) -[2023-08-24 08:50:42,243][inference][INFO] - + Forward pass throughput: 293.00 (samples/s) -[2023-08-24 08:50:42,244][inference][INFO] - + Warming up the generation pass -[2023-08-24 08:50:42,732][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 08:50:48,037][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-24 08:50:48,038][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-24 08:50:48,038][inference][INFO] - Saving inference results -[2023-08-24 08:50:48,052][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e4c914d478cdac7bd7815886cccd3ab052e0ba79..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 55f2bd0fcc3c86c83ffd929e1c1af9c0be904484..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.898944,0.00367,272.0 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8d835829ceb6b529cd7d345e107cf42347695671..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 10:50:12,464][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:50:12,465][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:50:13,777][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 10:50:13,777][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:50:13,778][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:50:13,778][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:50:13,778][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:50:13,778][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:50:14,394][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:50:14,394][inference][INFO] - Running inference benchmark -[2023-08-24 10:50:14,510][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:50:14,511][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:50:14,576][inference][INFO] - + Forward pass peak memory: 466.898944 (MB) -[2023-08-24 10:50:14,578][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:50:14,579][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:50:14,617][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:50:19,664][inference][INFO] - + Forward pass latency: 3.67e-03 (s) -[2023-08-24 10:50:19,665][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-08-24 10:50:19,665][inference][INFO] - Saving inference results -[2023-08-24 10:50:19,675][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a7fcaecdbd9da7b4b35715f2fe06bf1058cd8a0a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 8c71b3f1edf9f89796ed8301c509adae1326b1f7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.943424,0.0041,976.0 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/main.log deleted file mode 100644 index bb4397f3133c3e3c5e5380794462c7db67083196..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 10:50:20,048][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:50:20,049][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:50:20,490][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 10:50:20,490][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:50:20,491][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:50:20,491][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:50:20,491][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:50:20,491][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:50:20,611][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:50:20,612][inference][INFO] - Running inference benchmark -[2023-08-24 10:50:20,731][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:50:20,732][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:50:20,776][inference][INFO] - + Forward pass peak memory: 467.943424 (MB) -[2023-08-24 10:50:20,777][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:50:20,779][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:50:20,820][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:50:25,864][inference][INFO] - + Forward pass latency: 4.10e-03 (s) -[2023-08-24 10:50:25,865][inference][INFO] - + Forward pass throughput: 976.00 (samples/s) -[2023-08-24 10:50:25,865][inference][INFO] - Saving inference results -[2023-08-24 10:50:25,872][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b78d4c87d1ab0ba105798bceac9db1985f2e8f6e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 89632e70d6763e34dbbe8626f08ddf914e3d8343..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.46304,0.00374,267.0,0.517,193.0 diff --git a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9fb69d4bf7584ed68d815b5d320caf5a63bb13e7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:14:27_f01459c75db47308698b19b8b1bac1ae1159cd31/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 10:50:30,672][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:50:30,672][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:50:32,195][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 10:50:32,195][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:50:32,195][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:50:32,196][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:50:32,196][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:50:32,196][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:50:32,997][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:50:32,997][inference][INFO] - Running inference benchmark -[2023-08-24 10:50:33,202][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:50:33,249][inference][INFO] - + Forward pass peak memory: 469.46304 (MB) -[2023-08-24 10:50:33,250][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:50:33,281][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:50:38,324][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-08-24 10:50:38,325][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-24 10:50:38,326][inference][INFO] - + Warming up the generation pass -[2023-08-24 10:50:38,910][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 10:50:44,083][inference][INFO] - + Generation pass latency: 5.17e-01 (s) -[2023-08-24 10:50:44,084][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-24 10:50:44,084][inference][INFO] - Saving inference results -[2023-08-24 10:50:44,096][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 59d91e753fa5bbfd2251f1393a8f850a0804a212..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 960dfd722f1ca337589932db4b081829c5ad3766..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.34950399999997,0.00312,321.0 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/main.log deleted file mode 100644 index 79e8757f73b3b1d791a354f46a6d4dfe2ddeda92..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 10:51:50,032][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:51:50,033][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:51:51,292][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 10:51:51,293][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:51:51,293][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:51:51,293][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:51:51,293][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:51:51,294][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:51:51,926][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:51:51,926][inference][INFO] - Running inference benchmark -[2023-08-24 10:51:52,042][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:51:52,043][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:51:52,104][inference][INFO] - + Forward pass peak memory: 467.34950399999997 (MB) -[2023-08-24 10:51:52,105][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:51:52,107][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:51:52,148][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:51:57,199][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-08-24 10:51:57,201][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-08-24 10:51:57,201][inference][INFO] - Saving inference results -[2023-08-24 10:51:57,211][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4e6b8a6482ebcfec55d7d23077ba9c5fd60c08f8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 957d9b88c2432cc576f774b6b93282893e7d6b80..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.455424,0.00344,1160.0 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 67fc58f98b48027ab423f70f59f5261dcbcf46a6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 10:51:57,601][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:51:57,602][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:51:58,049][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 10:51:58,050][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:51:58,050][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:51:58,050][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:51:58,050][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:51:58,050][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:51:58,171][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:51:58,171][inference][INFO] - Running inference benchmark -[2023-08-24 10:51:58,293][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:51:58,294][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:51:58,338][inference][INFO] - + Forward pass peak memory: 468.455424 (MB) -[2023-08-24 10:51:58,339][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:51:58,341][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:51:58,376][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:52:03,425][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-08-24 10:52:03,426][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-24 10:52:03,426][inference][INFO] - Saving inference results -[2023-08-24 10:52:03,435][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b832aa3a7ca5e5bd2ffaea872eb394d1adf8e437..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 46dfad66843117387733f89567f77b982c0831a1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.184512,0.00375,267.0,0.533,188.0 diff --git a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5e1ab60b7e6d8b6b9dd32936bd78abe4bc78ad39..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_09:15:29_8fff61b9db86ac3ad92deea48d504b5dafc3b78e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 10:52:08,244][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:52:08,246][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:52:09,661][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 10:52:09,661][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:52:09,661][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:52:09,661][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:52:09,662][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:52:09,662][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:52:10,301][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:52:10,302][inference][INFO] - Running inference benchmark -[2023-08-24 10:52:10,503][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:52:10,550][inference][INFO] - + Forward pass peak memory: 469.184512 (MB) -[2023-08-24 10:52:10,551][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:52:10,587][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:52:15,631][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-24 10:52:15,633][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-24 10:52:15,633][inference][INFO] - + Warming up the generation pass -[2023-08-24 10:52:16,128][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 10:52:21,458][inference][INFO] - + Generation pass latency: 5.33e-01 (s) -[2023-08-24 10:52:21,459][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-08-24 10:52:21,459][inference][INFO] - Saving inference results -[2023-08-24 10:52:21,470][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3c0299e738e2b55c9340477591e81b1b40a7b90c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index d26c9aaf5dbe9cbf07b15778ae65519ee5fd8089..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.50515199999995,0.00306,327.0 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/main.log deleted file mode 100644 index 635a949f9259d22d357137f371f21ad4b8308bc8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 10:53:25,888][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:53:25,888][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:53:27,137][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 10:53:27,138][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:53:27,138][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:53:27,138][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:53:27,138][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:53:27,138][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:53:27,750][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:53:27,750][inference][INFO] - Running inference benchmark -[2023-08-24 10:53:27,866][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:53:27,867][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:53:27,933][inference][INFO] - + Forward pass peak memory: 467.50515199999995 (MB) -[2023-08-24 10:53:27,934][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:53:27,936][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:53:27,968][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:53:33,020][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-24 10:53:33,021][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-24 10:53:33,022][inference][INFO] - Saving inference results -[2023-08-24 10:53:33,033][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index db390718a2e50b60dadc544dc5adfcc99fc32e30..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 6394f513680e39c4c15d9b643df1118f80fac788..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.59468799999996,0.00342,1170.0 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/main.log deleted file mode 100644 index d86b12e9c14d54352a52dd9b850cf19c30325f41..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 10:53:33,404][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:53:33,405][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:53:33,856][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 10:53:33,856][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:53:33,856][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:53:33,857][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:53:33,857][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:53:33,857][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:53:33,978][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:53:33,978][inference][INFO] - Running inference benchmark -[2023-08-24 10:53:34,236][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:53:34,237][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:53:34,281][inference][INFO] - + Forward pass peak memory: 468.59468799999996 (MB) -[2023-08-24 10:53:34,282][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 10:53:34,283][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:53:34,319][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:53:39,368][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-08-24 10:53:39,369][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-24 10:53:39,370][inference][INFO] - Saving inference results -[2023-08-24 10:53:39,378][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 84e3ce1131108be03b6e275a0cf763834be3ccc3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8b5a86413c0a023e148115defc0f42f6be155435..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.963328,0.00376,266.0,0.481,208.0 diff --git a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b4b0c8f3631b9190a9ba13895ed1cbf3f5f03973..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_10:09:12_70b49f023c9f6579c516671604468a491227b4da/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 10:53:44,117][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 10:53:44,118][benchmark][INFO] - + Setting seed(42) -[2023-08-24 10:53:45,734][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 10:53:45,735][backend][INFO] - Configuring pytorch backend -[2023-08-24 10:53:45,735][backend][INFO] - + Checking initial device isolation -[2023-08-24 10:53:45,735][backend][INFO] - + Checking contineous device isolation -[2023-08-24 10:53:45,735][pytorch][INFO] - + Disabling gradients -[2023-08-24 10:53:45,736][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 10:53:46,372][pytorch][INFO] - + Turning on eval mode -[2023-08-24 10:53:46,373][inference][INFO] - Running inference benchmark -[2023-08-24 10:53:46,565][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 10:53:46,611][inference][INFO] - + Forward pass peak memory: 468.963328 (MB) -[2023-08-24 10:53:46,612][inference][INFO] - + Warming up the forward pass -[2023-08-24 10:53:46,643][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 10:53:51,691][inference][INFO] - + Forward pass latency: 3.76e-03 (s) -[2023-08-24 10:53:51,694][inference][INFO] - + Forward pass throughput: 266.00 (samples/s) -[2023-08-24 10:53:51,695][inference][INFO] - + Warming up the generation pass -[2023-08-24 10:53:52,187][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 10:53:57,478][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-24 10:53:57,480][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-24 10:53:57,481][inference][INFO] - Saving inference results -[2023-08-24 10:53:57,493][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a47d0a4811eb1e7e2ffba8754ec39e95c2e2855c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2d675e2425b5d44c7985a4037d53d1386321b568..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.7632,0.00379,264.0 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5661c9e4b2f6c1b2f9841fc87d1120fd98fec457..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 12:58:34,222][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 12:58:34,223][benchmark][INFO] - + Setting seed(42) -[2023-08-24 12:58:35,453][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 12:58:35,454][backend][INFO] - Configuring pytorch backend -[2023-08-24 12:58:35,454][backend][INFO] - + Checking initial device isolation -[2023-08-24 12:58:35,454][backend][INFO] - + Checking contineous device isolation -[2023-08-24 12:58:35,454][pytorch][INFO] - + Disabling gradients -[2023-08-24 12:58:35,454][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 12:58:36,081][pytorch][INFO] - + Turning on eval mode -[2023-08-24 12:58:36,081][inference][INFO] - Running inference benchmark -[2023-08-24 12:58:36,203][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 12:58:36,205][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 12:58:36,261][inference][INFO] - + Forward pass peak memory: 467.7632 (MB) -[2023-08-24 12:58:36,262][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 12:58:36,264][inference][INFO] - + Warming up the forward pass -[2023-08-24 12:58:36,299][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 12:58:41,344][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-24 12:58:41,346][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-24 12:58:41,346][inference][INFO] - Saving inference results -[2023-08-24 12:58:41,358][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cc1c06357d0fa1f31c1d6aaae7e593417b9ccd1d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d9b24e55fca2212777fcf08d0e879d99b0e38025..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.81587199999996,0.00434,922.0 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 75409fc817e11a320f32712dff8d856168bc3c2a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 12:58:41,751][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 12:58:41,752][benchmark][INFO] - + Setting seed(42) -[2023-08-24 12:58:42,290][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 12:58:42,291][backend][INFO] - Configuring pytorch backend -[2023-08-24 12:58:42,291][backend][INFO] - + Checking initial device isolation -[2023-08-24 12:58:42,291][backend][INFO] - + Checking contineous device isolation -[2023-08-24 12:58:42,291][pytorch][INFO] - + Disabling gradients -[2023-08-24 12:58:42,291][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 12:58:42,551][pytorch][INFO] - + Turning on eval mode -[2023-08-24 12:58:42,552][inference][INFO] - Running inference benchmark -[2023-08-24 12:58:42,683][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 12:58:42,685][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 12:58:42,729][inference][INFO] - + Forward pass peak memory: 468.81587199999996 (MB) -[2023-08-24 12:58:42,730][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 12:58:42,732][inference][INFO] - + Warming up the forward pass -[2023-08-24 12:58:42,776][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 12:58:47,814][inference][INFO] - + Forward pass latency: 4.34e-03 (s) -[2023-08-24 12:58:47,815][inference][INFO] - + Forward pass throughput: 922.00 (samples/s) -[2023-08-24 12:58:47,816][inference][INFO] - Saving inference results -[2023-08-24 12:58:47,824][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1a312910ac5a6b5b584be1137bd0c360386ef076..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 242d78a8d90fd12def5520f2444de07e7440bfb3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.979712,0.00303,330.0,0.485,206.0 diff --git a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 97879e22f1fdc60059ad9c46697377c075e733e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:56:11_2febd506149d039b51590f5dc7b45f0d8624819d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 12:58:52,708][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 12:58:52,709][benchmark][INFO] - + Setting seed(42) -[2023-08-24 12:58:54,209][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 12:58:54,209][backend][INFO] - Configuring pytorch backend -[2023-08-24 12:58:54,209][backend][INFO] - + Checking initial device isolation -[2023-08-24 12:58:54,210][backend][INFO] - + Checking contineous device isolation -[2023-08-24 12:58:54,210][pytorch][INFO] - + Disabling gradients -[2023-08-24 12:58:54,210][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 12:58:54,871][pytorch][INFO] - + Turning on eval mode -[2023-08-24 12:58:54,872][inference][INFO] - Running inference benchmark -[2023-08-24 12:58:55,064][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 12:58:55,106][inference][INFO] - + Forward pass peak memory: 468.979712 (MB) -[2023-08-24 12:58:55,108][inference][INFO] - + Warming up the forward pass -[2023-08-24 12:58:55,139][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 12:59:00,193][inference][INFO] - + Forward pass latency: 3.03e-03 (s) -[2023-08-24 12:59:00,195][inference][INFO] - + Forward pass throughput: 330.00 (samples/s) -[2023-08-24 12:59:00,196][inference][INFO] - + Warming up the generation pass -[2023-08-24 12:59:00,689][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 12:59:06,025][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-08-24 12:59:06,027][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-24 12:59:06,027][inference][INFO] - Saving inference results -[2023-08-24 12:59:06,040][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9d7eb88dad64446632b0fd9516b50fe355402603..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3202e715174522c708e970e9239054e9ba294360..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.28806399999996,0.00315,317.0 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/main.log deleted file mode 100644 index 417d5c9719b0d52af3733d3f5a4ca9944bd4eba4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 13:00:13,552][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 13:00:13,553][benchmark][INFO] - + Setting seed(42) -[2023-08-24 13:00:14,820][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 13:00:14,820][backend][INFO] - Configuring pytorch backend -[2023-08-24 13:00:14,820][backend][INFO] - + Checking initial device isolation -[2023-08-24 13:00:14,820][backend][INFO] - + Checking contineous device isolation -[2023-08-24 13:00:14,821][pytorch][INFO] - + Disabling gradients -[2023-08-24 13:00:14,821][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 13:00:15,445][pytorch][INFO] - + Turning on eval mode -[2023-08-24 13:00:15,446][inference][INFO] - Running inference benchmark -[2023-08-24 13:00:15,564][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 13:00:15,566][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 13:00:15,624][inference][INFO] - + Forward pass peak memory: 467.28806399999996 (MB) -[2023-08-24 13:00:15,626][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 13:00:15,627][inference][INFO] - + Warming up the forward pass -[2023-08-24 13:00:15,659][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 13:00:20,709][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-24 13:00:20,711][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-24 13:00:20,711][inference][INFO] - Saving inference results -[2023-08-24 13:00:20,722][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3b6ce87d299c331e8401b8d26c3284085cc9fb52..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index fd550aca6dc84a1c24f1d1d08d27b16d41e2bf7d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.402176,0.0036,1110.0 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/main.log deleted file mode 100644 index d42d746038b83182cde4221837a9680248852a34..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 13:00:21,096][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 13:00:21,098][benchmark][INFO] - + Setting seed(42) -[2023-08-24 13:00:21,569][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 13:00:21,569][backend][INFO] - Configuring pytorch backend -[2023-08-24 13:00:21,569][backend][INFO] - + Checking initial device isolation -[2023-08-24 13:00:21,569][backend][INFO] - + Checking contineous device isolation -[2023-08-24 13:00:21,570][pytorch][INFO] - + Disabling gradients -[2023-08-24 13:00:21,570][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 13:00:21,689][pytorch][INFO] - + Turning on eval mode -[2023-08-24 13:00:21,689][inference][INFO] - Running inference benchmark -[2023-08-24 13:00:21,824][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 13:00:21,826][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 13:00:21,870][inference][INFO] - + Forward pass peak memory: 468.402176 (MB) -[2023-08-24 13:00:21,871][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 13:00:21,873][inference][INFO] - + Warming up the forward pass -[2023-08-24 13:00:21,924][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 13:00:26,967][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-08-24 13:00:26,968][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-24 13:00:26,968][inference][INFO] - Saving inference results -[2023-08-24 13:00:26,976][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 0e153e9221b656d41ab8be6c8b8b4190fe77b6d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 212b638109359abf6d3832c305b75d63a3ef7db1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.184512,0.00381,262.0,0.563,178.0 diff --git a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 853ede67581c42204e530f135b843fe3c5f6c75f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_12:57:16_584eeb5387193d352da976cc3d1305f5c3404850/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 13:00:31,777][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 13:00:31,778][benchmark][INFO] - + Setting seed(42) -[2023-08-24 13:00:33,211][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 13:00:33,211][backend][INFO] - Configuring pytorch backend -[2023-08-24 13:00:33,211][backend][INFO] - + Checking initial device isolation -[2023-08-24 13:00:33,212][backend][INFO] - + Checking contineous device isolation -[2023-08-24 13:00:33,212][pytorch][INFO] - + Disabling gradients -[2023-08-24 13:00:33,212][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 13:00:33,874][pytorch][INFO] - + Turning on eval mode -[2023-08-24 13:00:33,874][inference][INFO] - Running inference benchmark -[2023-08-24 13:00:34,077][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 13:00:34,123][inference][INFO] - + Forward pass peak memory: 469.184512 (MB) -[2023-08-24 13:00:34,125][inference][INFO] - + Warming up the forward pass -[2023-08-24 13:00:34,156][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 13:00:39,200][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-24 13:00:39,202][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-24 13:00:39,203][inference][INFO] - + Warming up the generation pass -[2023-08-24 13:00:39,791][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 13:00:44,863][inference][INFO] - + Generation pass latency: 5.63e-01 (s) -[2023-08-24 13:00:44,864][inference][INFO] - + Generation pass throughput: 178.00 (tokens/s) -[2023-08-24 13:00:44,864][inference][INFO] - Saving inference results -[2023-08-24 13:00:44,876][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 82a0a240dd83710dca5aa961548f5e1e17205222..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 172e815f7c651db3d2477d1f8161b85044d95afe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.86208,0.00319,313.0 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/main.log deleted file mode 100644 index 946cd0cd985c322f42ba0573c7ce89722169615b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 14:49:46,180][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 14:49:46,180][benchmark][INFO] - + Setting seed(42) -[2023-08-24 14:49:47,421][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 14:49:47,421][backend][INFO] - Configuring pytorch backend -[2023-08-24 14:49:47,422][backend][INFO] - + Checking initial device isolation -[2023-08-24 14:49:47,422][backend][INFO] - + Checking contineous device isolation -[2023-08-24 14:49:47,422][pytorch][INFO] - + Disabling gradients -[2023-08-24 14:49:47,422][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 14:49:48,042][pytorch][INFO] - + Turning on eval mode -[2023-08-24 14:49:48,042][inference][INFO] - Running inference benchmark -[2023-08-24 14:49:48,163][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:49:48,164][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 14:49:48,226][inference][INFO] - + Forward pass peak memory: 466.86208 (MB) -[2023-08-24 14:49:48,227][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:49:48,229][inference][INFO] - + Warming up the forward pass -[2023-08-24 14:49:48,267][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 14:49:53,316][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-08-24 14:49:53,317][inference][INFO] - + Forward pass throughput: 313.00 (samples/s) -[2023-08-24 14:49:53,317][inference][INFO] - Saving inference results -[2023-08-24 14:49:53,329][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e6414eb48360e4cf6d2b9cfbaaa6e57b70872d7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ee2fff299ea8312bd94ee4b14237a5c3161be0a6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.96390399999996,0.00345,1160.0 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/main.log deleted file mode 100644 index c24821b8b731ceeab06cdf8540822f2d20c49011..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 14:49:53,703][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 14:49:53,704][benchmark][INFO] - + Setting seed(42) -[2023-08-24 14:49:54,261][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 14:49:54,261][backend][INFO] - Configuring pytorch backend -[2023-08-24 14:49:54,261][backend][INFO] - + Checking initial device isolation -[2023-08-24 14:49:54,262][backend][INFO] - + Checking contineous device isolation -[2023-08-24 14:49:54,262][pytorch][INFO] - + Disabling gradients -[2023-08-24 14:49:54,262][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 14:49:54,389][pytorch][INFO] - + Turning on eval mode -[2023-08-24 14:49:54,389][inference][INFO] - Running inference benchmark -[2023-08-24 14:49:54,535][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:49:54,536][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 14:49:54,579][inference][INFO] - + Forward pass peak memory: 467.96390399999996 (MB) -[2023-08-24 14:49:54,580][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:49:54,582][inference][INFO] - + Warming up the forward pass -[2023-08-24 14:49:54,617][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 14:49:59,661][inference][INFO] - + Forward pass latency: 3.45e-03 (s) -[2023-08-24 14:49:59,662][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-24 14:49:59,662][inference][INFO] - Saving inference results -[2023-08-24 14:49:59,670][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 611984c7b2ec6b1c03ca64ed168ad420010b4223..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index b5f11c1365d94556fd09fa6c3c000533bf3db8b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.086208,0.00449,223.0,0.495,202.0 diff --git a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index faca206857aa0797341893f1d9750679549a8fb8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_13:15:43_0a365c3e6a0e174302debff4023182838607acf1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 14:50:04,469][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 14:50:04,470][benchmark][INFO] - + Setting seed(42) -[2023-08-24 14:50:06,037][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 14:50:06,037][backend][INFO] - Configuring pytorch backend -[2023-08-24 14:50:06,037][backend][INFO] - + Checking initial device isolation -[2023-08-24 14:50:06,037][backend][INFO] - + Checking contineous device isolation -[2023-08-24 14:50:06,038][pytorch][INFO] - + Disabling gradients -[2023-08-24 14:50:06,038][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 14:50:06,723][pytorch][INFO] - + Turning on eval mode -[2023-08-24 14:50:06,723][inference][INFO] - Running inference benchmark -[2023-08-24 14:50:06,922][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 14:50:06,970][inference][INFO] - + Forward pass peak memory: 469.086208 (MB) -[2023-08-24 14:50:06,972][inference][INFO] - + Warming up the forward pass -[2023-08-24 14:50:07,004][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 14:50:12,046][inference][INFO] - + Forward pass latency: 4.49e-03 (s) -[2023-08-24 14:50:12,047][inference][INFO] - + Forward pass throughput: 223.00 (samples/s) -[2023-08-24 14:50:12,048][inference][INFO] - + Warming up the generation pass -[2023-08-24 14:50:12,642][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 14:50:18,085][inference][INFO] - + Generation pass latency: 4.95e-01 (s) -[2023-08-24 14:50:18,086][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-08-24 14:50:18,086][inference][INFO] - Saving inference results -[2023-08-24 14:50:18,097][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2cb3bfa3c7a0b0b04722ff4b1715ebd19d4116f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index ab24fe6ab57e1b11139485daf125cd351b2231d2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.620416,0.00349,287.0 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/main.log deleted file mode 100644 index ff0d936e85330c135d1a8c6f3f04902922be3bcd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 14:51:24,722][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 14:51:24,723][benchmark][INFO] - + Setting seed(42) -[2023-08-24 14:51:26,018][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 14:51:26,019][backend][INFO] - Configuring pytorch backend -[2023-08-24 14:51:26,019][backend][INFO] - + Checking initial device isolation -[2023-08-24 14:51:26,019][backend][INFO] - + Checking contineous device isolation -[2023-08-24 14:51:26,019][pytorch][INFO] - + Disabling gradients -[2023-08-24 14:51:26,019][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 14:51:26,641][pytorch][INFO] - + Turning on eval mode -[2023-08-24 14:51:26,642][inference][INFO] - Running inference benchmark -[2023-08-24 14:51:26,766][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:51:26,767][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 14:51:26,829][inference][INFO] - + Forward pass peak memory: 466.620416 (MB) -[2023-08-24 14:51:26,830][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:51:26,832][inference][INFO] - + Warming up the forward pass -[2023-08-24 14:51:26,879][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 14:51:31,925][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-08-24 14:51:31,927][inference][INFO] - + Forward pass throughput: 287.00 (samples/s) -[2023-08-24 14:51:31,927][inference][INFO] - Saving inference results -[2023-08-24 14:51:31,937][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a0b64f8746ea335fceac0403744b008473fbf156..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3dfa67d46c9b2396724d7cb6eb024a6b50e8223b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.61983999999995,0.00351,1140.0 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 38ff1a2dc4612b040030b8c773835c6b42dd03aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 14:51:32,311][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 14:51:32,312][benchmark][INFO] - + Setting seed(42) -[2023-08-24 14:51:32,765][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 14:51:32,765][backend][INFO] - Configuring pytorch backend -[2023-08-24 14:51:32,765][backend][INFO] - + Checking initial device isolation -[2023-08-24 14:51:32,766][backend][INFO] - + Checking contineous device isolation -[2023-08-24 14:51:32,766][pytorch][INFO] - + Disabling gradients -[2023-08-24 14:51:32,766][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 14:51:32,885][pytorch][INFO] - + Turning on eval mode -[2023-08-24 14:51:32,886][inference][INFO] - Running inference benchmark -[2023-08-24 14:51:33,007][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:51:33,009][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 14:51:33,056][inference][INFO] - + Forward pass peak memory: 467.61983999999995 (MB) -[2023-08-24 14:51:33,057][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 14:51:33,059][inference][INFO] - + Warming up the forward pass -[2023-08-24 14:51:33,096][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 14:51:38,140][inference][INFO] - + Forward pass latency: 3.51e-03 (s) -[2023-08-24 14:51:38,141][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-08-24 14:51:38,141][inference][INFO] - Saving inference results -[2023-08-24 14:51:38,149][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 4fc59cf9acf294d14e0a3bbe094b557ad690ac2d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index b49f552f0129f63ed13988539dc491100500be89..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.692992,0.00383,261.0,0.497,201.0 diff --git a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 432bdec6682b2d3f83a939a6a5cbe3e247cc516b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_14:18:39_fecf08560cd9843b569279dd6f665c987890af4c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 14:51:42,972][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 14:51:42,973][benchmark][INFO] - + Setting seed(42) -[2023-08-24 14:51:44,494][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 14:51:44,494][backend][INFO] - Configuring pytorch backend -[2023-08-24 14:51:44,495][backend][INFO] - + Checking initial device isolation -[2023-08-24 14:51:44,495][backend][INFO] - + Checking contineous device isolation -[2023-08-24 14:51:44,495][pytorch][INFO] - + Disabling gradients -[2023-08-24 14:51:44,495][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 14:51:45,227][pytorch][INFO] - + Turning on eval mode -[2023-08-24 14:51:45,227][inference][INFO] - Running inference benchmark -[2023-08-24 14:51:45,441][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 14:51:45,492][inference][INFO] - + Forward pass peak memory: 468.692992 (MB) -[2023-08-24 14:51:45,493][inference][INFO] - + Warming up the forward pass -[2023-08-24 14:51:45,526][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 14:51:50,571][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-24 14:51:50,573][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-24 14:51:50,573][inference][INFO] - + Warming up the generation pass -[2023-08-24 14:51:51,144][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 14:51:56,613][inference][INFO] - + Generation pass latency: 4.97e-01 (s) -[2023-08-24 14:51:56,614][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-08-24 14:51:56,614][inference][INFO] - Saving inference results -[2023-08-24 14:51:56,626][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3b85b1ac77995ff2d23566dac4bea332c5f87435..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 832596466df8bcae5af205b4d27ebbf6c8a58e89..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.218432,0.00312,321.0 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0c8bcf7a0938afa97f37595b751fdc1e0c7b768a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 16:50:10,272][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:50:10,273][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:50:11,534][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 16:50:11,534][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:50:11,534][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:50:11,535][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:50:11,535][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:50:11,535][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:50:12,154][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:50:12,154][inference][INFO] - Running inference benchmark -[2023-08-24 16:50:12,278][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:50:12,279][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:50:12,344][inference][INFO] - + Forward pass peak memory: 467.218432 (MB) -[2023-08-24 16:50:12,345][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:50:12,347][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:50:12,392][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:50:17,444][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-08-24 16:50:17,445][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-08-24 16:50:17,445][inference][INFO] - Saving inference results -[2023-08-24 16:50:17,455][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 47c5fdbaef06cbdc8710c4f033529a81abb93805..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 74f02c354e04b14301ed2858a1574870fecf9e8f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.31615999999997,0.00337,1190.0 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 22d3f406eae0599e89968ca7d7a619f7c964cefe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 16:50:17,825][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:50:17,826][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:50:18,270][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 16:50:18,271][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:50:18,271][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:50:18,271][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:50:18,271][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:50:18,271][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:50:18,391][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:50:18,391][inference][INFO] - Running inference benchmark -[2023-08-24 16:50:18,513][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:50:18,514][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:50:18,555][inference][INFO] - + Forward pass peak memory: 468.31615999999997 (MB) -[2023-08-24 16:50:18,556][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:50:18,557][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:50:18,592][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:50:23,639][inference][INFO] - + Forward pass latency: 3.37e-03 (s) -[2023-08-24 16:50:23,640][inference][INFO] - + Forward pass throughput: 1190.00 (samples/s) -[2023-08-24 16:50:23,641][inference][INFO] - Saving inference results -[2023-08-24 16:50:23,648][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7c0a2a6bbce8588175c0ec473c3aa1640a5eb5e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 064b48fb754392b797d579920a7bef265624b101..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.76671999999996,0.00399,251.0,0.588,170.0 diff --git a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index be61203c62462c71ddd70fa7c4f3b62a9d1261ef..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_15:33:14_7a6efe1e9f756f585f2ffe5ada22cf6b15edd23b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 16:50:28,582][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:50:28,582][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:50:30,099][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 16:50:30,099][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:50:30,100][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:50:30,100][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:50:30,100][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:50:30,100][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:50:30,741][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:50:30,741][inference][INFO] - Running inference benchmark -[2023-08-24 16:50:30,940][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:50:30,988][inference][INFO] - + Forward pass peak memory: 468.76671999999996 (MB) -[2023-08-24 16:50:30,990][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:50:31,027][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:50:36,071][inference][INFO] - + Forward pass latency: 3.99e-03 (s) -[2023-08-24 16:50:36,072][inference][INFO] - + Forward pass throughput: 251.00 (samples/s) -[2023-08-24 16:50:36,073][inference][INFO] - + Warming up the generation pass -[2023-08-24 16:50:36,663][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 16:50:41,953][inference][INFO] - + Generation pass latency: 5.88e-01 (s) -[2023-08-24 16:50:41,954][inference][INFO] - + Generation pass throughput: 170.00 (tokens/s) -[2023-08-24 16:50:41,954][inference][INFO] - Saving inference results -[2023-08-24 16:50:41,965][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 767b89620578aca713fc8e9b6aee13b01c437cdd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 7a7cb88ad0553f4ba713d78c92d1d2359747657b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.51392,0.00384,260.0 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5e064dbeaca53d61081daac840145a65931f456d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 16:51:50,286][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:51:50,287][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:51:51,809][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 16:51:51,809][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:51:51,809][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:51:51,809][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:51:51,809][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:51:51,810][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:51:52,438][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:51:52,438][inference][INFO] - Running inference benchmark -[2023-08-24 16:51:52,558][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:51:52,559][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:51:52,619][inference][INFO] - + Forward pass peak memory: 466.51392 (MB) -[2023-08-24 16:51:52,621][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:51:52,622][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:51:52,656][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:51:57,704][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-08-24 16:51:57,706][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-24 16:51:57,706][inference][INFO] - Saving inference results -[2023-08-24 16:51:57,716][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8227dd4fe156dd2a9e5c52a74e6650242c2b8ca3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1d1c10e467a865d60e8fb36eba4a1322edaf02c2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.525632,0.00451,887.0 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6587752164a0f7783aae84c00ab9d9ffeefce4cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 16:51:58,087][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:51:58,088][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:51:58,530][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 16:51:58,531][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:51:58,531][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:51:58,531][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:51:58,531][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:51:58,531][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:51:58,651][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:51:58,651][inference][INFO] - Running inference benchmark -[2023-08-24 16:51:58,779][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:51:58,780][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:51:58,821][inference][INFO] - + Forward pass peak memory: 467.525632 (MB) -[2023-08-24 16:51:58,822][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:51:58,824][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:51:58,867][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:52:03,906][inference][INFO] - + Forward pass latency: 4.51e-03 (s) -[2023-08-24 16:52:03,907][inference][INFO] - + Forward pass throughput: 887.00 (samples/s) -[2023-08-24 16:52:03,907][inference][INFO] - Saving inference results -[2023-08-24 16:52:03,914][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 0343ae108de97a6f2d8b3fb5de41cfb8fc0af11e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4f1c7ae38dff83dd6c924ddd3c8182db523a5779..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.897792,0.00303,330.0,0.485,206.0 diff --git a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0edd4be0e947faa1f79953f726b138de4ab742cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:24:36_1b2381c46b834a89e447f7a01f0961c4e940d117/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 16:52:08,940][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:52:08,941][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:52:10,364][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 16:52:10,365][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:52:10,365][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:52:10,365][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:52:10,365][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:52:10,365][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:52:11,162][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:52:11,163][inference][INFO] - Running inference benchmark -[2023-08-24 16:52:11,518][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:52:11,562][inference][INFO] - + Forward pass peak memory: 468.897792 (MB) -[2023-08-24 16:52:11,563][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:52:11,597][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:52:16,650][inference][INFO] - + Forward pass latency: 3.03e-03 (s) -[2023-08-24 16:52:16,651][inference][INFO] - + Forward pass throughput: 330.00 (samples/s) -[2023-08-24 16:52:16,651][inference][INFO] - + Warming up the generation pass -[2023-08-24 16:52:17,138][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 16:52:22,479][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-08-24 16:52:22,480][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-24 16:52:22,480][inference][INFO] - Saving inference results -[2023-08-24 16:52:22,491][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index cd67e8227994a933d84f7574fa993997802ff8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 46d16a2d12b335a160398c52ed0495e889a724fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.919424,0.00359,279.0 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0bcbf603a8ee3d5654c49d5a8eb3f5bea84dc2bf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 16:53:28,801][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:53:28,802][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:53:30,073][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 16:53:30,073][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:53:30,073][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:53:30,074][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:53:30,074][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:53:30,074][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:53:30,706][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:53:30,707][inference][INFO] - Running inference benchmark -[2023-08-24 16:53:30,826][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:53:30,828][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:53:30,888][inference][INFO] - + Forward pass peak memory: 466.919424 (MB) -[2023-08-24 16:53:30,889][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:53:30,891][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:53:30,930][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:53:35,979][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-08-24 16:53:35,981][inference][INFO] - + Forward pass throughput: 279.00 (samples/s) -[2023-08-24 16:53:35,981][inference][INFO] - Saving inference results -[2023-08-24 16:53:35,991][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 23762e3d09e082627bd5c4b04a8347525e0cf41d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index bf9fb2ab980a0cec138747f8ffbfaaf7a4eaadaa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.94752,0.00354,1130.0 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2b46de347b3369baced60a457a669517c75c860c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 16:53:36,364][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:53:36,364][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:53:36,812][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 16:53:36,812][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:53:36,812][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:53:36,812][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:53:36,812][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:53:36,813][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:53:36,923][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:53:36,923][inference][INFO] - Running inference benchmark -[2023-08-24 16:53:37,043][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:53:37,044][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:53:37,084][inference][INFO] - + Forward pass peak memory: 467.94752 (MB) -[2023-08-24 16:53:37,085][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 16:53:37,086][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:53:37,122][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:53:42,167][inference][INFO] - + Forward pass latency: 3.54e-03 (s) -[2023-08-24 16:53:42,168][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-24 16:53:42,168][inference][INFO] - Saving inference results -[2023-08-24 16:53:42,175][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 848e1fa5aced165270a4fe3b24182d97b871bba2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a62b60e8663996557435aee4b1522b82719c39c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.10259199999996,0.00379,264.0,0.543,184.0 diff --git a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 20a712c3a3cf68ea843cb0388ad799d3409b46f5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:48:41_fd0b94fd7b0c00c68e2e9f054793287808e33608/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 16:53:47,054][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 16:53:47,055][benchmark][INFO] - + Setting seed(42) -[2023-08-24 16:53:48,732][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 16:53:48,733][backend][INFO] - Configuring pytorch backend -[2023-08-24 16:53:48,733][backend][INFO] - + Checking initial device isolation -[2023-08-24 16:53:48,733][backend][INFO] - + Checking contineous device isolation -[2023-08-24 16:53:48,733][pytorch][INFO] - + Disabling gradients -[2023-08-24 16:53:48,734][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 16:53:49,397][pytorch][INFO] - + Turning on eval mode -[2023-08-24 16:53:49,398][inference][INFO] - Running inference benchmark -[2023-08-24 16:53:49,604][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 16:53:49,648][inference][INFO] - + Forward pass peak memory: 469.10259199999996 (MB) -[2023-08-24 16:53:49,650][inference][INFO] - + Warming up the forward pass -[2023-08-24 16:53:49,691][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 16:53:54,738][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-24 16:53:54,739][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-24 16:53:54,740][inference][INFO] - + Warming up the generation pass -[2023-08-24 16:53:55,318][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 16:54:00,746][inference][INFO] - + Generation pass latency: 5.43e-01 (s) -[2023-08-24 16:54:00,747][inference][INFO] - + Generation pass throughput: 184.00 (tokens/s) -[2023-08-24 16:54:00,748][inference][INFO] - Saving inference results -[2023-08-24 16:54:00,758][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c20bc77c99419bf9910c37ff203736407a10c453..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c6d25e0322ddb2e7a476741bbb92c11051b20bda..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.357696,0.00322,311.0 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/main.log deleted file mode 100644 index dbbf9799a56b87aafeded2dd53801a63b4e7fecd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 18:50:00,200][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 18:50:00,202][benchmark][INFO] - + Setting seed(42) -[2023-08-24 18:50:01,423][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 18:50:01,424][backend][INFO] - Configuring pytorch backend -[2023-08-24 18:50:01,424][backend][INFO] - + Checking initial device isolation -[2023-08-24 18:50:01,424][backend][INFO] - + Checking contineous device isolation -[2023-08-24 18:50:01,424][pytorch][INFO] - + Disabling gradients -[2023-08-24 18:50:01,425][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 18:50:02,033][pytorch][INFO] - + Turning on eval mode -[2023-08-24 18:50:02,034][inference][INFO] - Running inference benchmark -[2023-08-24 18:50:02,159][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:50:02,161][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 18:50:02,219][inference][INFO] - + Forward pass peak memory: 467.357696 (MB) -[2023-08-24 18:50:02,221][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:50:02,222][inference][INFO] - + Warming up the forward pass -[2023-08-24 18:50:02,255][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 18:50:07,306][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-08-24 18:50:07,307][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-08-24 18:50:07,308][inference][INFO] - Saving inference results -[2023-08-24 18:50:07,319][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 07ed5bdfa794399903ecbdfad2e5d0bcb519faa9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 969852cd00b0ead5fcaa8184f66b7501f07f5b1b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.41855999999996,0.00356,1120.0 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4c1fcb8c2cfb2e5dd05985233798fdabb3d35d67..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 18:50:07,711][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 18:50:07,713][benchmark][INFO] - + Setting seed(42) -[2023-08-24 18:50:08,160][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 18:50:08,160][backend][INFO] - Configuring pytorch backend -[2023-08-24 18:50:08,160][backend][INFO] - + Checking initial device isolation -[2023-08-24 18:50:08,160][backend][INFO] - + Checking contineous device isolation -[2023-08-24 18:50:08,160][pytorch][INFO] - + Disabling gradients -[2023-08-24 18:50:08,161][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 18:50:08,269][pytorch][INFO] - + Turning on eval mode -[2023-08-24 18:50:08,270][inference][INFO] - Running inference benchmark -[2023-08-24 18:50:08,405][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:50:08,406][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 18:50:08,454][inference][INFO] - + Forward pass peak memory: 468.41855999999996 (MB) -[2023-08-24 18:50:08,455][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:50:08,457][inference][INFO] - + Warming up the forward pass -[2023-08-24 18:50:08,508][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 18:50:13,554][inference][INFO] - + Forward pass latency: 3.56e-03 (s) -[2023-08-24 18:50:13,556][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-24 18:50:13,556][inference][INFO] - Saving inference results -[2023-08-24 18:50:13,564][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 754f9c9c6005f7ed749ae53f406c9c47e47559e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8ca7628bea747c00b3a2f44a63f6eaa3382b41fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.86911999999995,0.00386,259.0,0.511,196.0 diff --git a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ca7d2debdc0b48e5c3a905f2126833e717fb9e93..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_16:58:37_021887682224daf29264f98c759a45e88c82e244/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 18:50:18,706][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 18:50:18,707][benchmark][INFO] - + Setting seed(42) -[2023-08-24 18:50:20,356][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 18:50:20,357][backend][INFO] - Configuring pytorch backend -[2023-08-24 18:50:20,357][backend][INFO] - + Checking initial device isolation -[2023-08-24 18:50:20,357][backend][INFO] - + Checking contineous device isolation -[2023-08-24 18:50:20,357][pytorch][INFO] - + Disabling gradients -[2023-08-24 18:50:20,357][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 18:50:21,002][pytorch][INFO] - + Turning on eval mode -[2023-08-24 18:50:21,002][inference][INFO] - Running inference benchmark -[2023-08-24 18:50:21,194][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 18:50:21,239][inference][INFO] - + Forward pass peak memory: 468.86911999999995 (MB) -[2023-08-24 18:50:21,240][inference][INFO] - + Warming up the forward pass -[2023-08-24 18:50:21,272][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 18:50:26,315][inference][INFO] - + Forward pass latency: 3.86e-03 (s) -[2023-08-24 18:50:26,317][inference][INFO] - + Forward pass throughput: 259.00 (samples/s) -[2023-08-24 18:50:26,317][inference][INFO] - + Warming up the generation pass -[2023-08-24 18:50:26,820][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 18:50:31,927][inference][INFO] - + Generation pass latency: 5.11e-01 (s) -[2023-08-24 18:50:31,927][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-08-24 18:50:31,928][inference][INFO] - Saving inference results -[2023-08-24 18:50:31,939][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 293874fb214ddb5a070bc2b60425bddf95f58fd4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8ba99c686b2fd7b8887682074b945d8d1586c9a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.304448,0.00322,311.0 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/main.log b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/main.log deleted file mode 100644 index d32e18f598841434dc2250e527a9c4e7c72dd8aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 18:51:39,241][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 18:51:39,242][benchmark][INFO] - + Setting seed(42) -[2023-08-24 18:51:40,448][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 18:51:40,449][backend][INFO] - Configuring pytorch backend -[2023-08-24 18:51:40,449][backend][INFO] - + Checking initial device isolation -[2023-08-24 18:51:40,449][backend][INFO] - + Checking contineous device isolation -[2023-08-24 18:51:40,449][pytorch][INFO] - + Disabling gradients -[2023-08-24 18:51:40,449][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 18:51:41,085][pytorch][INFO] - + Turning on eval mode -[2023-08-24 18:51:41,086][inference][INFO] - Running inference benchmark -[2023-08-24 18:51:41,201][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:51:41,202][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 18:51:41,261][inference][INFO] - + Forward pass peak memory: 467.304448 (MB) -[2023-08-24 18:51:41,262][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:51:41,264][inference][INFO] - + Warming up the forward pass -[2023-08-24 18:51:41,297][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 18:51:46,349][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-08-24 18:51:46,350][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-08-24 18:51:46,350][inference][INFO] - Saving inference results -[2023-08-24 18:51:46,363][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5cabb5cda14744a4d510e4b10c9542733287f040..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a0b7a42e31f30d0fe9617ae45ba450502f6b197e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.3776,0.00358,1120.0 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/main.log b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/main.log deleted file mode 100644 index 02021d4c2859864400f80cacb8889cfdb4107fd4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-24 18:51:46,755][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 18:51:46,756][benchmark][INFO] - + Setting seed(42) -[2023-08-24 18:51:47,198][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-24 18:51:47,198][backend][INFO] - Configuring pytorch backend -[2023-08-24 18:51:47,198][backend][INFO] - + Checking initial device isolation -[2023-08-24 18:51:47,198][backend][INFO] - + Checking contineous device isolation -[2023-08-24 18:51:47,198][pytorch][INFO] - + Disabling gradients -[2023-08-24 18:51:47,198][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 18:51:47,315][pytorch][INFO] - + Turning on eval mode -[2023-08-24 18:51:47,316][inference][INFO] - Running inference benchmark -[2023-08-24 18:51:47,435][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:51:47,436][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 18:51:47,478][inference][INFO] - + Forward pass peak memory: 468.3776 (MB) -[2023-08-24 18:51:47,479][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-24 18:51:47,481][inference][INFO] - + Warming up the forward pass -[2023-08-24 18:51:47,519][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 18:51:52,565][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-24 18:51:52,565][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-24 18:51:52,566][inference][INFO] - Saving inference results -[2023-08-24 18:51:52,573][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 66e6bff09b49fe513968b472c1632e4c8181de34..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8b466d80b069c3e8caa8fcd9e76083e0a6c4d51a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.88550399999997,0.00375,267.0,0.525,190.0 diff --git a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 7355472e821d1969d2fd089c1cf5375570b154cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-24_18:14:58_f26099e7b5cf579f99a42bab6ddd371bf2c8d548/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-24 18:51:57,388][benchmark][INFO] - Configuring inference benchmark -[2023-08-24 18:51:57,389][benchmark][INFO] - + Setting seed(42) -[2023-08-24 18:51:58,865][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-24 18:51:58,865][backend][INFO] - Configuring pytorch backend -[2023-08-24 18:51:58,865][backend][INFO] - + Checking initial device isolation -[2023-08-24 18:51:58,865][backend][INFO] - + Checking contineous device isolation -[2023-08-24 18:51:58,866][pytorch][INFO] - + Disabling gradients -[2023-08-24 18:51:58,866][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-24 18:51:59,550][pytorch][INFO] - + Turning on eval mode -[2023-08-24 18:51:59,551][inference][INFO] - Running inference benchmark -[2023-08-24 18:51:59,770][inference][INFO] - + Tracking forward pass peak memory -[2023-08-24 18:51:59,822][inference][INFO] - + Forward pass peak memory: 468.88550399999997 (MB) -[2023-08-24 18:51:59,823][inference][INFO] - + Warming up the forward pass -[2023-08-24 18:51:59,856][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-24 18:52:04,902][inference][INFO] - + Forward pass latency: 3.75e-03 (s) -[2023-08-24 18:52:04,904][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-24 18:52:04,904][inference][INFO] - + Warming up the generation pass -[2023-08-24 18:52:05,398][inference][INFO] - + Tracking generation latency and throughput -[2023-08-24 18:52:10,647][inference][INFO] - + Generation pass latency: 5.25e-01 (s) -[2023-08-24 18:52:10,648][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s) -[2023-08-24 18:52:10,648][inference][INFO] - Saving inference results -[2023-08-24 18:52:10,660][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 60e85d3ae4201790e0fc424c5d652c78e615e041..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6aec5cea13562a39570d7315226904a6d6916cf1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.29215999999997,0.00378,265.0 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/main.log deleted file mode 100644 index 44cab8140ae092cce5cdfb3d4725f502d9e15f69..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 06:50:04,793][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 06:50:04,795][benchmark][INFO] - + Setting seed(42) -[2023-08-25 06:50:06,047][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 06:50:06,047][backend][INFO] - Configuring pytorch backend -[2023-08-25 06:50:06,048][backend][INFO] - + Checking initial device isolation -[2023-08-25 06:50:06,048][backend][INFO] - + Checking contineous device isolation -[2023-08-25 06:50:06,048][pytorch][INFO] - + Disabling gradients -[2023-08-25 06:50:06,048][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 06:50:06,678][pytorch][INFO] - + Turning on eval mode -[2023-08-25 06:50:06,678][inference][INFO] - Running inference benchmark -[2023-08-25 06:50:06,799][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 06:50:06,801][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 06:50:06,859][inference][INFO] - + Forward pass peak memory: 467.29215999999997 (MB) -[2023-08-25 06:50:06,861][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 06:50:06,862][inference][INFO] - + Warming up the forward pass -[2023-08-25 06:50:06,895][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 06:50:11,940][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-25 06:50:11,941][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-25 06:50:11,941][inference][INFO] - Saving inference results -[2023-08-25 06:50:11,953][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 26a09057f5d366da822a625fd5d363b3fb5f3470..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7f29285a30f3a4924a1707ebf4bf4e1be1c27a5e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.340736,0.00432,926.0 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/main.log deleted file mode 100644 index 79421bd0d99a70ea0ebaf99d38c35fa6dedb076e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 06:50:12,326][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 06:50:12,328][benchmark][INFO] - + Setting seed(42) -[2023-08-25 06:50:12,814][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 06:50:12,814][backend][INFO] - Configuring pytorch backend -[2023-08-25 06:50:12,815][backend][INFO] - + Checking initial device isolation -[2023-08-25 06:50:12,815][backend][INFO] - + Checking contineous device isolation -[2023-08-25 06:50:12,815][pytorch][INFO] - + Disabling gradients -[2023-08-25 06:50:12,815][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 06:50:12,932][pytorch][INFO] - + Turning on eval mode -[2023-08-25 06:50:12,933][inference][INFO] - Running inference benchmark -[2023-08-25 06:50:13,060][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 06:50:13,061][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 06:50:13,105][inference][INFO] - + Forward pass peak memory: 468.340736 (MB) -[2023-08-25 06:50:13,106][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 06:50:13,108][inference][INFO] - + Warming up the forward pass -[2023-08-25 06:50:13,152][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 06:50:18,192][inference][INFO] - + Forward pass latency: 4.32e-03 (s) -[2023-08-25 06:50:18,193][inference][INFO] - + Forward pass throughput: 926.00 (samples/s) -[2023-08-25 06:50:18,193][inference][INFO] - Saving inference results -[2023-08-25 06:50:18,201][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7b027127fb4432c8c0320230f22521a7ca196b8c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a1bb40b45ba39b2ca2518290fdd5ba727da00ff7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.1968,0.00451,222.0,0.528,189.0 diff --git a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e2be3835ba13d8546d0bce3b5db517ce67b5400f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_06:19:11_ae320fa53f74cc4dfa0e4fc3c95b6129a86b0512/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 06:50:22,989][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 06:50:22,990][benchmark][INFO] - + Setting seed(42) -[2023-08-25 06:50:24,425][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 06:50:24,425][backend][INFO] - Configuring pytorch backend -[2023-08-25 06:50:24,426][backend][INFO] - + Checking initial device isolation -[2023-08-25 06:50:24,426][backend][INFO] - + Checking contineous device isolation -[2023-08-25 06:50:24,426][pytorch][INFO] - + Disabling gradients -[2023-08-25 06:50:24,426][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 06:50:25,096][pytorch][INFO] - + Turning on eval mode -[2023-08-25 06:50:25,096][inference][INFO] - Running inference benchmark -[2023-08-25 06:50:25,300][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 06:50:25,349][inference][INFO] - + Forward pass peak memory: 469.1968 (MB) -[2023-08-25 06:50:25,351][inference][INFO] - + Warming up the forward pass -[2023-08-25 06:50:25,385][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 06:50:30,427][inference][INFO] - + Forward pass latency: 4.51e-03 (s) -[2023-08-25 06:50:30,428][inference][INFO] - + Forward pass throughput: 222.00 (samples/s) -[2023-08-25 06:50:30,429][inference][INFO] - + Warming up the generation pass -[2023-08-25 06:50:31,027][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 06:50:36,312][inference][INFO] - + Generation pass latency: 5.28e-01 (s) -[2023-08-25 06:50:36,313][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s) -[2023-08-25 06:50:36,314][inference][INFO] - Saving inference results -[2023-08-25 06:50:36,326][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6c65298c2348ece8d6188cf5443da557ffa58533..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 1cfe8b875a498dc47b508f106678e65ff48436fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.804736,0.00366,273.0 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/main.log deleted file mode 100644 index 10c34f742db336af7de7daf2e5978786b09cfd0f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 08:49:56,509][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 08:49:56,510][benchmark][INFO] - + Setting seed(42) -[2023-08-25 08:49:57,811][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 08:49:57,811][backend][INFO] - Configuring pytorch backend -[2023-08-25 08:49:57,811][backend][INFO] - + Checking initial device isolation -[2023-08-25 08:49:57,811][backend][INFO] - + Checking contineous device isolation -[2023-08-25 08:49:57,812][pytorch][INFO] - + Disabling gradients -[2023-08-25 08:49:57,812][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 08:49:58,501][pytorch][INFO] - + Turning on eval mode -[2023-08-25 08:49:58,502][inference][INFO] - Running inference benchmark -[2023-08-25 08:49:58,624][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 08:49:58,625][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 08:49:58,682][inference][INFO] - + Forward pass peak memory: 466.804736 (MB) -[2023-08-25 08:49:58,683][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 08:49:58,684][inference][INFO] - + Warming up the forward pass -[2023-08-25 08:49:58,722][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 08:50:03,769][inference][INFO] - + Forward pass latency: 3.66e-03 (s) -[2023-08-25 08:50:03,770][inference][INFO] - + Forward pass throughput: 273.00 (samples/s) -[2023-08-25 08:50:03,770][inference][INFO] - Saving inference results -[2023-08-25 08:50:03,781][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4d0bf4a90fe4d1e05829a72ee281563ccbb992bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 13cb379663a94c2ba37d5b429329e0d23315439a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.80415999999997,0.00346,1160.0 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/main.log deleted file mode 100644 index c5085999858e8a52dae460013a795da39daa3e82..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 08:50:04,145][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 08:50:04,146][benchmark][INFO] - + Setting seed(42) -[2023-08-25 08:50:04,598][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 08:50:04,599][backend][INFO] - Configuring pytorch backend -[2023-08-25 08:50:04,599][backend][INFO] - + Checking initial device isolation -[2023-08-25 08:50:04,599][backend][INFO] - + Checking contineous device isolation -[2023-08-25 08:50:04,599][pytorch][INFO] - + Disabling gradients -[2023-08-25 08:50:04,599][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 08:50:04,725][pytorch][INFO] - + Turning on eval mode -[2023-08-25 08:50:04,726][inference][INFO] - Running inference benchmark -[2023-08-25 08:50:04,872][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 08:50:04,873][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 08:50:04,915][inference][INFO] - + Forward pass peak memory: 467.80415999999997 (MB) -[2023-08-25 08:50:04,916][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 08:50:04,917][inference][INFO] - + Warming up the forward pass -[2023-08-25 08:50:04,953][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 08:50:09,998][inference][INFO] - + Forward pass latency: 3.46e-03 (s) -[2023-08-25 08:50:10,000][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-25 08:50:10,000][inference][INFO] - Saving inference results -[2023-08-25 08:50:10,008][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2ba79f1c0c93d64cc28181c64e635af6bfe25d08..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c62cd6f11fc73c1c0e5c05ffe1460483722be13f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.21318399999996,0.00394,254.0,0.492,203.0 diff --git a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 439e3305fb34cdc1ebcb065cbc010c3b3a5298b2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_07:20:37_8968fface4e804f380391d880f569578b84b4121/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 08:50:14,912][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 08:50:14,912][benchmark][INFO] - + Setting seed(42) -[2023-08-25 08:50:16,427][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 08:50:16,427][backend][INFO] - Configuring pytorch backend -[2023-08-25 08:50:16,428][backend][INFO] - + Checking initial device isolation -[2023-08-25 08:50:16,428][backend][INFO] - + Checking contineous device isolation -[2023-08-25 08:50:16,428][pytorch][INFO] - + Disabling gradients -[2023-08-25 08:50:16,428][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 08:50:17,191][pytorch][INFO] - + Turning on eval mode -[2023-08-25 08:50:17,192][inference][INFO] - Running inference benchmark -[2023-08-25 08:50:17,542][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 08:50:17,588][inference][INFO] - + Forward pass peak memory: 469.21318399999996 (MB) -[2023-08-25 08:50:17,590][inference][INFO] - + Warming up the forward pass -[2023-08-25 08:50:17,627][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 08:50:22,670][inference][INFO] - + Forward pass latency: 3.94e-03 (s) -[2023-08-25 08:50:22,672][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-08-25 08:50:22,673][inference][INFO] - + Warming up the generation pass -[2023-08-25 08:50:23,176][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 08:50:28,589][inference][INFO] - + Generation pass latency: 4.92e-01 (s) -[2023-08-25 08:50:28,590][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-25 08:50:28,590][inference][INFO] - Saving inference results -[2023-08-25 08:50:28,608][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 14e64f7ee3d9290a31bb63919cdee7e8435e5266..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 07ba86adc476610dce905449e8f07accdb050de0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.75091199999997,0.00369,271.0 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/main.log deleted file mode 100644 index fe87cca07c4b9d1becc82eb632a7fe74d9be7daf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 10:50:23,013][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 10:50:23,014][benchmark][INFO] - + Setting seed(42) -[2023-08-25 10:50:24,391][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 10:50:24,391][backend][INFO] - Configuring pytorch backend -[2023-08-25 10:50:24,392][backend][INFO] - + Checking initial device isolation -[2023-08-25 10:50:24,392][backend][INFO] - + Checking contineous device isolation -[2023-08-25 10:50:24,392][pytorch][INFO] - + Disabling gradients -[2023-08-25 10:50:24,392][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 10:50:25,350][pytorch][INFO] - + Turning on eval mode -[2023-08-25 10:50:25,351][inference][INFO] - Running inference benchmark -[2023-08-25 10:50:25,474][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 10:50:25,475][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 10:50:25,539][inference][INFO] - + Forward pass peak memory: 467.75091199999997 (MB) -[2023-08-25 10:50:25,541][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 10:50:25,542][inference][INFO] - + Warming up the forward pass -[2023-08-25 10:50:25,575][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 10:50:30,622][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-08-25 10:50:30,624][inference][INFO] - + Forward pass throughput: 271.00 (samples/s) -[2023-08-25 10:50:30,624][inference][INFO] - Saving inference results -[2023-08-25 10:50:30,637][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f3a40e2c2904a445531dc4b605c1236821e85e98..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e732eeb1bfbc4e7ff2416854982dc34b2987ae59..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.774912,0.00347,1150.0 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4d9b69268fedf74002873cb97b3e0d807f83293a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 10:50:31,021][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 10:50:31,022][benchmark][INFO] - + Setting seed(42) -[2023-08-25 10:50:31,476][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 10:50:31,476][backend][INFO] - Configuring pytorch backend -[2023-08-25 10:50:31,476][backend][INFO] - + Checking initial device isolation -[2023-08-25 10:50:31,477][backend][INFO] - + Checking contineous device isolation -[2023-08-25 10:50:31,477][pytorch][INFO] - + Disabling gradients -[2023-08-25 10:50:31,477][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 10:50:31,751][pytorch][INFO] - + Turning on eval mode -[2023-08-25 10:50:31,752][inference][INFO] - Running inference benchmark -[2023-08-25 10:50:31,872][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 10:50:31,873][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 10:50:31,917][inference][INFO] - + Forward pass peak memory: 468.774912 (MB) -[2023-08-25 10:50:31,918][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 10:50:31,920][inference][INFO] - + Warming up the forward pass -[2023-08-25 10:50:31,956][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 10:50:37,001][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-08-25 10:50:37,002][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-25 10:50:37,002][inference][INFO] - Saving inference results -[2023-08-25 10:50:37,011][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 4b30fcab985eab82d426dd18b6da79ee9a4d6f0f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8505fea72a1be78c7343dfef6663569d3bbfbbc0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.372928,0.00382,262.0,0.507,197.0 diff --git a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 717333a6a333569e9ac345bad15bc99e37fbad27..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_08:58:14_cb8e3ee25fc2349e9262faa1e0c35d80978349fe/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 10:50:41,845][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 10:50:41,846][benchmark][INFO] - + Setting seed(42) -[2023-08-25 10:50:43,519][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 10:50:43,520][backend][INFO] - Configuring pytorch backend -[2023-08-25 10:50:43,520][backend][INFO] - + Checking initial device isolation -[2023-08-25 10:50:43,520][backend][INFO] - + Checking contineous device isolation -[2023-08-25 10:50:43,520][pytorch][INFO] - + Disabling gradients -[2023-08-25 10:50:43,520][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 10:50:44,173][pytorch][INFO] - + Turning on eval mode -[2023-08-25 10:50:44,174][inference][INFO] - Running inference benchmark -[2023-08-25 10:50:44,393][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 10:50:44,437][inference][INFO] - + Forward pass peak memory: 469.372928 (MB) -[2023-08-25 10:50:44,439][inference][INFO] - + Warming up the forward pass -[2023-08-25 10:50:44,470][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 10:50:49,516][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-25 10:50:49,518][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-25 10:50:49,519][inference][INFO] - + Warming up the generation pass -[2023-08-25 10:50:50,026][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 10:50:55,097][inference][INFO] - + Generation pass latency: 5.07e-01 (s) -[2023-08-25 10:50:55,098][inference][INFO] - + Generation pass throughput: 197.00 (tokens/s) -[2023-08-25 10:50:55,098][inference][INFO] - Saving inference results -[2023-08-25 10:50:55,111][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ff4911c5b07878d1b87933f5c1dca74a5f8a309b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 36ab183323afe79b414eaae790df25ed314a0085..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.86208,0.00365,274.0 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/main.log deleted file mode 100644 index e1186cec9f5394964e958aca6c39a90842092175..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 12:58:04,025][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 12:58:04,026][benchmark][INFO] - + Setting seed(42) -[2023-08-25 12:58:05,266][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 12:58:05,266][backend][INFO] - Configuring pytorch backend -[2023-08-25 12:58:05,266][backend][INFO] - + Checking initial device isolation -[2023-08-25 12:58:05,266][backend][INFO] - + Checking contineous device isolation -[2023-08-25 12:58:05,267][pytorch][INFO] - + Disabling gradients -[2023-08-25 12:58:05,267][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 12:58:05,931][pytorch][INFO] - + Turning on eval mode -[2023-08-25 12:58:05,931][inference][INFO] - Running inference benchmark -[2023-08-25 12:58:06,061][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:58:06,062][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 12:58:06,127][inference][INFO] - + Forward pass peak memory: 466.86208 (MB) -[2023-08-25 12:58:06,129][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:58:06,130][inference][INFO] - + Warming up the forward pass -[2023-08-25 12:58:06,167][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 12:58:11,222][inference][INFO] - + Forward pass latency: 3.65e-03 (s) -[2023-08-25 12:58:11,224][inference][INFO] - + Forward pass throughput: 274.00 (samples/s) -[2023-08-25 12:58:11,224][inference][INFO] - Saving inference results -[2023-08-25 12:58:11,236][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6a6b692f5377a22707bc32371e47aeea940af1da..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 848f3630a791cc9045d6c7ca8cb40f2014f8d1eb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.890176,0.0036,1110.0 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/main.log deleted file mode 100644 index c72f32d4cc76df360f9664ebc54543daff7563e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 12:58:11,607][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 12:58:11,608][benchmark][INFO] - + Setting seed(42) -[2023-08-25 12:58:12,047][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 12:58:12,047][backend][INFO] - Configuring pytorch backend -[2023-08-25 12:58:12,047][backend][INFO] - + Checking initial device isolation -[2023-08-25 12:58:12,047][backend][INFO] - + Checking contineous device isolation -[2023-08-25 12:58:12,047][pytorch][INFO] - + Disabling gradients -[2023-08-25 12:58:12,048][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 12:58:12,165][pytorch][INFO] - + Turning on eval mode -[2023-08-25 12:58:12,165][inference][INFO] - Running inference benchmark -[2023-08-25 12:58:12,300][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:58:12,301][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 12:58:12,343][inference][INFO] - + Forward pass peak memory: 467.890176 (MB) -[2023-08-25 12:58:12,344][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:58:12,345][inference][INFO] - + Warming up the forward pass -[2023-08-25 12:58:12,396][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 12:58:17,443][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-08-25 12:58:17,444][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-25 12:58:17,445][inference][INFO] - Saving inference results -[2023-08-25 12:58:17,453][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c33e9fbb02447d63a01c8aa451f8c377f3325612..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d75198593d134bc2b9f57180f21f76ec75c3440d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.143552,0.00348,287.0,0.514,195.0 diff --git a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 1a133feda3820d0b8f5dc39d0c472a6b2be71632..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_10:56:17_85cf90a1c92f574ce2eb3fafe0681a3af0a9d41b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 12:58:22,647][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 12:58:22,648][benchmark][INFO] - + Setting seed(42) -[2023-08-25 12:58:24,232][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 12:58:24,233][backend][INFO] - Configuring pytorch backend -[2023-08-25 12:58:24,233][backend][INFO] - + Checking initial device isolation -[2023-08-25 12:58:24,233][backend][INFO] - + Checking contineous device isolation -[2023-08-25 12:58:24,233][pytorch][INFO] - + Disabling gradients -[2023-08-25 12:58:24,234][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 12:58:24,858][pytorch][INFO] - + Turning on eval mode -[2023-08-25 12:58:24,859][inference][INFO] - Running inference benchmark -[2023-08-25 12:58:25,060][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 12:58:25,111][inference][INFO] - + Forward pass peak memory: 469.143552 (MB) -[2023-08-25 12:58:25,113][inference][INFO] - + Warming up the forward pass -[2023-08-25 12:58:25,148][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 12:58:30,201][inference][INFO] - + Forward pass latency: 3.48e-03 (s) -[2023-08-25 12:58:30,203][inference][INFO] - + Forward pass throughput: 287.00 (samples/s) -[2023-08-25 12:58:30,204][inference][INFO] - + Warming up the generation pass -[2023-08-25 12:58:30,700][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 12:58:35,846][inference][INFO] - + Generation pass latency: 5.14e-01 (s) -[2023-08-25 12:58:35,847][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-08-25 12:58:35,847][inference][INFO] - Saving inference results -[2023-08-25 12:58:35,863][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 95abfe8c8714c1fcfb8a682573561bb1f882dff3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 9ba48c7b734f51cd85824a2e3ec4407f4377e7f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.59936,0.00357,280.0 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/main.log deleted file mode 100644 index 71d36421327426b5597308dcd86ea394bb285b6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 12:59:42,749][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 12:59:42,749][benchmark][INFO] - + Setting seed(42) -[2023-08-25 12:59:44,036][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 12:59:44,036][backend][INFO] - Configuring pytorch backend -[2023-08-25 12:59:44,037][backend][INFO] - + Checking initial device isolation -[2023-08-25 12:59:44,037][backend][INFO] - + Checking contineous device isolation -[2023-08-25 12:59:44,037][pytorch][INFO] - + Disabling gradients -[2023-08-25 12:59:44,037][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 12:59:44,642][pytorch][INFO] - + Turning on eval mode -[2023-08-25 12:59:44,643][inference][INFO] - Running inference benchmark -[2023-08-25 12:59:44,764][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:59:44,765][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 12:59:44,826][inference][INFO] - + Forward pass peak memory: 467.59936 (MB) -[2023-08-25 12:59:44,827][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:59:44,829][inference][INFO] - + Warming up the forward pass -[2023-08-25 12:59:44,865][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 12:59:49,914][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-08-25 12:59:49,916][inference][INFO] - + Forward pass throughput: 280.00 (samples/s) -[2023-08-25 12:59:49,916][inference][INFO] - Saving inference results -[2023-08-25 12:59:49,927][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 36a45fc428ec9028d0eff3533072d667dd1d85d7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3f1b5edd6e37a5a2f86deb2ea7f99a6bb3fca6e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.66432,0.00446,897.0 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/main.log deleted file mode 100644 index 53cb116a1f05f61e5516d2e0281aa634389a66b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 12:59:50,324][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 12:59:50,325][benchmark][INFO] - + Setting seed(42) -[2023-08-25 12:59:50,782][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 12:59:50,782][backend][INFO] - Configuring pytorch backend -[2023-08-25 12:59:50,783][backend][INFO] - + Checking initial device isolation -[2023-08-25 12:59:50,783][backend][INFO] - + Checking contineous device isolation -[2023-08-25 12:59:50,783][pytorch][INFO] - + Disabling gradients -[2023-08-25 12:59:50,783][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 12:59:50,896][pytorch][INFO] - + Turning on eval mode -[2023-08-25 12:59:50,896][inference][INFO] - Running inference benchmark -[2023-08-25 12:59:51,018][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:59:51,020][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 12:59:51,059][inference][INFO] - + Forward pass peak memory: 468.66432 (MB) -[2023-08-25 12:59:51,060][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 12:59:51,062][inference][INFO] - + Warming up the forward pass -[2023-08-25 12:59:51,104][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 12:59:56,151][inference][INFO] - + Forward pass latency: 4.46e-03 (s) -[2023-08-25 12:59:56,152][inference][INFO] - + Forward pass throughput: 897.00 (samples/s) -[2023-08-25 12:59:56,153][inference][INFO] - Saving inference results -[2023-08-25 12:59:56,160][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9847c55e6eef8b4548bb3bb0e6f23a4f9fb48711..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 60cb7d6fd961fdb04e63c285250e77ebfc56828e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.67193599999996,0.00326,307.0,0.757,132.0 diff --git a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 260e7130ca93937f7ac41223acb09dc6cc14c734..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:35:40_c6a84b72025fa7795f7fb5c97e3de7861a4dfb01/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 13:00:01,599][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:00:01,599][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:00:03,118][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 13:00:03,118][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:00:03,119][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:00:03,119][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:00:03,119][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:00:03,119][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:00:03,824][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:00:03,825][inference][INFO] - Running inference benchmark -[2023-08-25 13:00:04,038][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:00:04,089][inference][INFO] - + Forward pass peak memory: 469.67193599999996 (MB) -[2023-08-25 13:00:04,090][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:00:04,123][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:00:09,181][inference][INFO] - + Forward pass latency: 3.26e-03 (s) -[2023-08-25 13:00:09,183][inference][INFO] - + Forward pass throughput: 307.00 (samples/s) -[2023-08-25 13:00:09,184][inference][INFO] - + Warming up the generation pass -[2023-08-25 13:00:09,674][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 13:00:14,978][inference][INFO] - + Generation pass latency: 7.57e-01 (s) -[2023-08-25 13:00:14,979][inference][INFO] - + Generation pass throughput: 132.00 (tokens/s) -[2023-08-25 13:00:14,979][inference][INFO] - Saving inference results -[2023-08-25 13:00:14,999][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 891ef20ff72b9984f6d00db3298ac7b54c4bc603..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 48a88e9525a1cb1f99d26edd28ed6d67f7ece52f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.202048,0.00365,274.0 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/main.log deleted file mode 100644 index c398a959ca8345657d6f9e93a0ce8edf0b1429ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 13:01:20,200][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:01:20,201][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:01:21,433][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 13:01:21,433][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:01:21,433][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:01:21,433][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:01:21,434][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:01:21,434][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:01:22,026][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:01:22,027][inference][INFO] - Running inference benchmark -[2023-08-25 13:01:22,152][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:01:22,154][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:01:22,216][inference][INFO] - + Forward pass peak memory: 467.202048 (MB) -[2023-08-25 13:01:22,217][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:01:22,219][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:01:22,257][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:01:27,304][inference][INFO] - + Forward pass latency: 3.65e-03 (s) -[2023-08-25 13:01:27,305][inference][INFO] - + Forward pass throughput: 274.00 (samples/s) -[2023-08-25 13:01:27,305][inference][INFO] - Saving inference results -[2023-08-25 13:01:27,315][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 33f24b057b259f8eb9a68f2a36d9dba5d57ca534..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9d9d2d57e52624b822d1742a95f11daa88c54e40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.230144,0.00413,969.0 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/main.log deleted file mode 100644 index b30cfd4c8b18fdf592a84a341c3c6100db49c10c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 13:01:27,679][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:01:27,680][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:01:28,124][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 13:01:28,125][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:01:28,125][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:01:28,125][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:01:28,125][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:01:28,125][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:01:28,241][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:01:28,241][inference][INFO] - Running inference benchmark -[2023-08-25 13:01:28,363][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:01:28,364][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:01:28,405][inference][INFO] - + Forward pass peak memory: 468.230144 (MB) -[2023-08-25 13:01:28,406][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:01:28,407][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:01:28,449][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:01:33,495][inference][INFO] - + Forward pass latency: 4.13e-03 (s) -[2023-08-25 13:01:33,497][inference][INFO] - + Forward pass throughput: 969.00 (samples/s) -[2023-08-25 13:01:33,497][inference][INFO] - Saving inference results -[2023-08-25 13:01:33,506][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2242265f9755a3c51dc61e95390f71a27976636d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5eb992b5dfa425b1d8ceda1447582b4cf0d27f92..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.143552,0.00388,258.0,0.599,167.0 diff --git a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0282cc3891107b230c057ad20755c1a3c867991e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_11:42:06_494e96d8d61277cd7509e5f90aa14e6ac604063a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 13:01:38,476][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:01:38,476][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:01:39,893][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 13:01:39,893][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:01:39,894][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:01:39,894][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:01:39,894][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:01:39,894][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:01:40,531][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:01:40,532][inference][INFO] - Running inference benchmark -[2023-08-25 13:01:40,727][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:01:40,773][inference][INFO] - + Forward pass peak memory: 469.143552 (MB) -[2023-08-25 13:01:40,774][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:01:40,811][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:01:45,854][inference][INFO] - + Forward pass latency: 3.88e-03 (s) -[2023-08-25 13:01:45,855][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-25 13:01:45,856][inference][INFO] - + Warming up the generation pass -[2023-08-25 13:01:46,444][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 13:01:51,836][inference][INFO] - + Generation pass latency: 5.99e-01 (s) -[2023-08-25 13:01:51,838][inference][INFO] - + Generation pass throughput: 167.00 (tokens/s) -[2023-08-25 13:01:51,838][inference][INFO] - Saving inference results -[2023-08-25 13:01:51,849][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c17426cf517adff6ace9a4696f4a2143d7102c43..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3b6b09aec582765dc3da9524df512d836882c45f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.644992,0.00316,316.0 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/main.log deleted file mode 100644 index 062fc3917e61ce4d64989b7415fc9e05824dce32..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 13:02:58,874][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:02:58,875][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:03:00,234][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 13:03:00,235][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:03:00,235][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:03:00,235][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:03:00,235][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:03:00,235][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:03:00,854][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:03:00,855][inference][INFO] - Running inference benchmark -[2023-08-25 13:03:00,983][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:03:00,985][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:03:01,043][inference][INFO] - + Forward pass peak memory: 466.644992 (MB) -[2023-08-25 13:03:01,044][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:03:01,046][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:03:01,081][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:03:06,133][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-25 13:03:06,134][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-25 13:03:06,134][inference][INFO] - Saving inference results -[2023-08-25 13:03:06,146][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6370007c82b75e0e8866b235ad20b3a858f9fb53..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index fef3c5dd9f06c3c5cb59f5e193d2ddedf698055c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.67718399999995,0.00359,1110.0 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/main.log deleted file mode 100644 index fb4eefb1f3688324232424e1732ec68d3204d05e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 13:03:06,517][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:03:06,518][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:03:06,982][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 13:03:06,982][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:03:06,982][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:03:06,982][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:03:06,983][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:03:06,983][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:03:07,112][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:03:07,112][inference][INFO] - Running inference benchmark -[2023-08-25 13:03:07,229][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:03:07,230][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:03:07,268][inference][INFO] - + Forward pass peak memory: 467.67718399999995 (MB) -[2023-08-25 13:03:07,269][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:03:07,271][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:03:07,306][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:03:12,352][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-08-25 13:03:12,353][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-25 13:03:12,354][inference][INFO] - Saving inference results -[2023-08-25 13:03:12,361][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 98f7eddb1ee49d592d68e56135af042fc9b0f942..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 7ed39e2e220dea1d0610a73c7e213de522e8a061..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.417984,0.00444,225.0,0.519,193.0 diff --git a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index dd01fec38ca99209ca37f63fdb76ef3abce1f069..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:30:39_0770ce6cfbcd8334084f9f2c4302e8c71ac931ee/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 13:03:17,268][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:03:17,268][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:03:18,719][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 13:03:18,719][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:03:18,719][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:03:18,720][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:03:18,720][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:03:18,720][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:03:19,414][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:03:19,415][inference][INFO] - Running inference benchmark -[2023-08-25 13:03:19,625][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:03:19,679][inference][INFO] - + Forward pass peak memory: 469.417984 (MB) -[2023-08-25 13:03:19,681][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:03:19,714][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:03:24,769][inference][INFO] - + Forward pass latency: 4.44e-03 (s) -[2023-08-25 13:03:24,771][inference][INFO] - + Forward pass throughput: 225.00 (samples/s) -[2023-08-25 13:03:24,771][inference][INFO] - + Warming up the generation pass -[2023-08-25 13:03:25,540][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 13:03:30,731][inference][INFO] - + Generation pass latency: 5.19e-01 (s) -[2023-08-25 13:03:30,732][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-25 13:03:30,732][inference][INFO] - Saving inference results -[2023-08-25 13:03:30,743][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fecc44acac7bbeafeaa277f09495d1830d2835a1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index fb8b0d4531445e6b5d3990d855f12cfd95043622..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.255296,0.00573,175.0 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/main.log deleted file mode 100644 index c841c59c1e2031430302de3c07129e0a0543dcc6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 13:04:39,094][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:04:39,095][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:04:40,691][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 13:04:40,691][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:04:40,691][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:04:40,691][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:04:40,691][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:04:40,692][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:04:41,286][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:04:41,287][inference][INFO] - Running inference benchmark -[2023-08-25 13:04:41,405][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:04:41,407][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:04:41,467][inference][INFO] - + Forward pass peak memory: 467.255296 (MB) -[2023-08-25 13:04:41,468][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:04:41,470][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:04:41,507][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:04:46,558][inference][INFO] - + Forward pass latency: 5.73e-03 (s) -[2023-08-25 13:04:46,560][inference][INFO] - + Forward pass throughput: 175.00 (samples/s) -[2023-08-25 13:04:46,560][inference][INFO] - Saving inference results -[2023-08-25 13:04:46,576][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 680670114978f6787d202758f7e59f2df5aefd57..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 6246cadbe5f887c924ad5992f9f652f0d563de80..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.14412799999997,0.00381,1050.0 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/main.log deleted file mode 100644 index bea16a2ddc83c986ab3344f0542b49fe1e7f6944..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 13:04:47,064][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:04:47,065][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:04:47,509][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 13:04:47,509][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:04:47,509][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:04:47,510][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:04:47,510][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:04:47,510][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:04:47,628][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:04:47,629][inference][INFO] - Running inference benchmark -[2023-08-25 13:04:47,747][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:04:47,749][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:04:47,790][inference][INFO] - + Forward pass peak memory: 468.14412799999997 (MB) -[2023-08-25 13:04:47,791][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 13:04:47,793][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:04:47,828][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:04:52,869][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-25 13:04:52,871][inference][INFO] - + Forward pass throughput: 1050.00 (samples/s) -[2023-08-25 13:04:52,871][inference][INFO] - Saving inference results -[2023-08-25 13:04:52,878][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 94bdbc024489de60eeaa9913a861ece1b2f1bde6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 403aea54a1df23973250780c87d41a8ad94c45f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.959232,0.00343,292.0,1.01,99.0 diff --git a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index efe3188d6e378778c223af3f6575123db3a633b8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_12:41:04_dd8b7d28aec80013ad2b25ead4200eea1a6a767e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 13:04:58,602][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 13:04:58,603][benchmark][INFO] - + Setting seed(42) -[2023-08-25 13:05:00,067][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 13:05:00,067][backend][INFO] - Configuring pytorch backend -[2023-08-25 13:05:00,068][backend][INFO] - + Checking initial device isolation -[2023-08-25 13:05:00,068][backend][INFO] - + Checking contineous device isolation -[2023-08-25 13:05:00,068][pytorch][INFO] - + Disabling gradients -[2023-08-25 13:05:00,068][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 13:05:00,767][pytorch][INFO] - + Turning on eval mode -[2023-08-25 13:05:00,768][inference][INFO] - Running inference benchmark -[2023-08-25 13:05:00,976][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 13:05:01,028][inference][INFO] - + Forward pass peak memory: 468.959232 (MB) -[2023-08-25 13:05:01,029][inference][INFO] - + Warming up the forward pass -[2023-08-25 13:05:01,066][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 13:05:06,149][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-08-25 13:05:06,151][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-08-25 13:05:06,154][inference][INFO] - + Warming up the generation pass -[2023-08-25 13:05:06,900][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 13:05:11,953][inference][INFO] - + Generation pass latency: 1.01e+00 (s) -[2023-08-25 13:05:11,954][inference][INFO] - + Generation pass throughput: 99.00 (tokens/s) -[2023-08-25 13:05:11,954][inference][INFO] - Saving inference results -[2023-08-25 13:05:11,970][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c1c7d48ff38590964ade4b83942ac5911a0981aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 89e885e741a384bf10b7e2830c163894e5312fc9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.19737599999996,0.00358,279.0 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/main.log deleted file mode 100644 index fc4f8f8ceff9a1ce65fce4c28ed4ab8cc15444ee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 14:49:44,049][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:49:44,051][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:49:45,469][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 14:49:45,469][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:49:45,469][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:49:45,470][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:49:45,470][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:49:45,470][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:49:46,166][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:49:46,167][inference][INFO] - Running inference benchmark -[2023-08-25 14:49:46,291][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:49:46,292][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:49:46,352][inference][INFO] - + Forward pass peak memory: 468.19737599999996 (MB) -[2023-08-25 14:49:46,354][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:49:46,355][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:49:46,397][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:49:51,448][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-25 14:49:51,449][inference][INFO] - + Forward pass throughput: 279.00 (samples/s) -[2023-08-25 14:49:51,450][inference][INFO] - Saving inference results -[2023-08-25 14:49:51,461][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2ff3826668ae3d84daad9daee6b459d21f92bd64..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 0d183d634bde319328392c812a78ab753cde0f42..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.241856,0.00395,1010.0 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9fd0e642ab6c700480d72c3ea754a6a2e8fb7600..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 14:49:51,839][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:49:51,840][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:49:52,290][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 14:49:52,290][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:49:52,291][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:49:52,291][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:49:52,291][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:49:52,291][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:49:52,406][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:49:52,406][inference][INFO] - Running inference benchmark -[2023-08-25 14:49:52,532][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:49:52,533][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:49:52,572][inference][INFO] - + Forward pass peak memory: 469.241856 (MB) -[2023-08-25 14:49:52,573][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:49:52,575][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:49:52,616][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:49:57,671][inference][INFO] - + Forward pass latency: 3.95e-03 (s) -[2023-08-25 14:49:57,674][inference][INFO] - + Forward pass throughput: 1010.00 (samples/s) -[2023-08-25 14:49:57,674][inference][INFO] - Saving inference results -[2023-08-25 14:49:57,685][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 74bd9c532cee78dab9b9ae804cdb3ef7a08d993a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f6a1f2db2df1db7de8083a74c0b4ff7c3071d4c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.44255999999996,0.00503,199.0,0.488,205.0 diff --git a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index df888cc8f1318349ba44997dedf9e3d6489b2432..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:36:41_35c570c80edb9f56aa8339c03d3975847a85cb9d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 14:50:03,207][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:50:03,207][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:50:04,615][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 14:50:04,616][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:50:04,616][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:50:04,616][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:50:04,616][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:50:04,616][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:50:05,301][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:50:05,301][inference][INFO] - Running inference benchmark -[2023-08-25 14:50:05,497][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:50:05,541][inference][INFO] - + Forward pass peak memory: 469.44255999999996 (MB) -[2023-08-25 14:50:05,542][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:50:05,577][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:50:10,621][inference][INFO] - + Forward pass latency: 5.03e-03 (s) -[2023-08-25 14:50:10,622][inference][INFO] - + Forward pass throughput: 199.00 (samples/s) -[2023-08-25 14:50:10,623][inference][INFO] - + Warming up the generation pass -[2023-08-25 14:50:11,163][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 14:50:16,529][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-08-25 14:50:16,530][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-25 14:50:16,530][inference][INFO] - Saving inference results -[2023-08-25 14:50:16,543][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 17a040b832954682b09607e34939bbe5f0c71ea9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index fc145ba90ff497af4382cbd4a45afd0ac7accef6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06687999999997,0.00339,295.0 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 4b1df133a1fc833a0e3aa7d796dd14493955b550..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 14:51:22,480][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:51:22,480][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:51:23,685][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 14:51:23,685][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:51:23,686][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:51:23,686][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:51:23,686][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:51:23,686][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:51:24,273][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:51:24,274][inference][INFO] - Running inference benchmark -[2023-08-25 14:51:24,390][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:51:24,391][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:51:24,451][inference][INFO] - + Forward pass peak memory: 467.06687999999997 (MB) -[2023-08-25 14:51:24,453][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:51:24,454][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:51:24,498][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:51:29,549][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-08-25 14:51:29,551][inference][INFO] - + Forward pass throughput: 295.00 (samples/s) -[2023-08-25 14:51:29,551][inference][INFO] - Saving inference results -[2023-08-25 14:51:29,569][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 030de85a3473609d1b707a39fd26b643a01f43ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3f5f3fe959986753a23b4e6eef9f2548367dee7c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.115456,0.00357,1120.0 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 94576a388b529cc03288dc565db2d7eca845a5a0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 14:51:29,974][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:51:29,974][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:51:30,420][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 14:51:30,420][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:51:30,420][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:51:30,420][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:51:30,420][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:51:30,421][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:51:30,533][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:51:30,534][inference][INFO] - Running inference benchmark -[2023-08-25 14:51:30,662][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:51:30,664][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:51:30,711][inference][INFO] - + Forward pass peak memory: 468.115456 (MB) -[2023-08-25 14:51:30,712][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:51:30,714][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:51:30,756][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:51:35,803][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-08-25 14:51:35,804][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-25 14:51:35,805][inference][INFO] - Saving inference results -[2023-08-25 14:51:35,813][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 0254ccb5ccbe412c79929f9c00c40479b459a9bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 04197a96c9bf1562459a4051d3075f5fe28ff8a7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.01248,0.00355,282.0,0.482,207.0 diff --git a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 3b6e09b8c6443be555a659d3b2ef50f28e590165..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_13:56:39_8b0a7bfcdcd51cdf8e7bd9a08ff061fda6bd2b8c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 14:51:40,570][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:51:40,571][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:51:42,470][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 14:51:42,471][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:51:42,471][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:51:42,471][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:51:42,471][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:51:42,471][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:51:43,123][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:51:43,124][inference][INFO] - Running inference benchmark -[2023-08-25 14:51:43,331][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:51:43,378][inference][INFO] - + Forward pass peak memory: 469.01248 (MB) -[2023-08-25 14:51:43,379][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:51:43,412][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:51:48,480][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-08-25 14:51:48,482][inference][INFO] - + Forward pass throughput: 282.00 (samples/s) -[2023-08-25 14:51:48,483][inference][INFO] - + Warming up the generation pass -[2023-08-25 14:51:48,969][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 14:51:54,269][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-25 14:51:54,271][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-25 14:51:54,271][inference][INFO] - Saving inference results -[2023-08-25 14:51:54,286][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7616be9fff9616dacf092a8d998a6bed50484608..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 876c6160fd756baa34e7df94132b27a1f692f401..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.378176,0.00313,319.0 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 616b08aebb17f22cd240112c2fa06d0332f954dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 14:53:01,056][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:53:01,057][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:53:02,354][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 14:53:02,355][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:53:02,355][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:53:02,355][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:53:02,355][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:53:02,355][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:53:02,979][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:53:02,980][inference][INFO] - Running inference benchmark -[2023-08-25 14:53:03,099][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:53:03,100][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:53:03,166][inference][INFO] - + Forward pass peak memory: 467.378176 (MB) -[2023-08-25 14:53:03,167][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:53:03,169][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:53:03,201][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:53:08,254][inference][INFO] - + Forward pass latency: 3.13e-03 (s) -[2023-08-25 14:53:08,255][inference][INFO] - + Forward pass throughput: 319.00 (samples/s) -[2023-08-25 14:53:08,256][inference][INFO] - Saving inference results -[2023-08-25 14:53:08,267][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 0ebcfe8731dcb4c5ed6004c5a478a1e7d2013061..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f313e91f953e40f6a713480fe9b43b089bfd1715..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.47180799999995,0.00487,821.0 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 3e64ea969998b677027d2045cfaac808bf7b1e19..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 14:53:08,637][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:53:08,638][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:53:09,110][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 14:53:09,110][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:53:09,110][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:53:09,110][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:53:09,110][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:53:09,110][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:53:09,225][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:53:09,226][inference][INFO] - Running inference benchmark -[2023-08-25 14:53:09,360][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:53:09,361][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:53:09,401][inference][INFO] - + Forward pass peak memory: 468.47180799999995 (MB) -[2023-08-25 14:53:09,402][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 14:53:09,404][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:53:09,446][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:53:14,493][inference][INFO] - + Forward pass latency: 4.87e-03 (s) -[2023-08-25 14:53:14,495][inference][INFO] - + Forward pass throughput: 821.00 (samples/s) -[2023-08-25 14:53:14,495][inference][INFO] - Saving inference results -[2023-08-25 14:53:14,507][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d95d531f96d22c76aa9fde3cf587dbe31e2cb2b8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0f8455fcf6dd7beb0ebc615a2d4548b8d60bd194..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.086208,0.00394,254.0,0.529,189.0 diff --git a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 244560601614ef10e3c641ce22d4740157f472b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_14:12:54_4d9e45f3ef624cab41f605d7439862ce23ca806a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 14:53:19,297][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 14:53:19,298][benchmark][INFO] - + Setting seed(42) -[2023-08-25 14:53:20,857][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 14:53:20,857][backend][INFO] - Configuring pytorch backend -[2023-08-25 14:53:20,857][backend][INFO] - + Checking initial device isolation -[2023-08-25 14:53:20,857][backend][INFO] - + Checking contineous device isolation -[2023-08-25 14:53:20,857][pytorch][INFO] - + Disabling gradients -[2023-08-25 14:53:20,858][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 14:53:21,469][pytorch][INFO] - + Turning on eval mode -[2023-08-25 14:53:21,469][inference][INFO] - Running inference benchmark -[2023-08-25 14:53:21,673][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 14:53:21,718][inference][INFO] - + Forward pass peak memory: 469.086208 (MB) -[2023-08-25 14:53:21,720][inference][INFO] - + Warming up the forward pass -[2023-08-25 14:53:21,757][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 14:53:26,800][inference][INFO] - + Forward pass latency: 3.94e-03 (s) -[2023-08-25 14:53:26,802][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-08-25 14:53:26,803][inference][INFO] - + Warming up the generation pass -[2023-08-25 14:53:27,361][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 14:53:32,655][inference][INFO] - + Generation pass latency: 5.29e-01 (s) -[2023-08-25 14:53:32,656][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s) -[2023-08-25 14:53:32,656][inference][INFO] - Saving inference results -[2023-08-25 14:53:32,673][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9cf6c08c696afe1f5fe61ae5601a22fa89ae1d98..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 050a53d116fe42e1ff43b5a02cd87172ad95193d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.94399999999996,0.0034,294.0 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/main.log deleted file mode 100644 index abbc5674fbf2770fa02b315aa56bb05c87a54039..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 16:50:05,704][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:50:05,705][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:50:07,255][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 16:50:07,255][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:50:07,255][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:50:07,255][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:50:07,255][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:50:07,256][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:50:08,060][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:50:08,061][inference][INFO] - Running inference benchmark -[2023-08-25 16:50:08,183][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:50:08,184][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:50:08,247][inference][INFO] - + Forward pass peak memory: 466.94399999999996 (MB) -[2023-08-25 16:50:08,248][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:50:08,250][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:50:08,285][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:50:13,338][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-25 16:50:13,340][inference][INFO] - + Forward pass throughput: 294.00 (samples/s) -[2023-08-25 16:50:13,340][inference][INFO] - Saving inference results -[2023-08-25 16:50:13,351][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 77f08f346a7c60c4cb8c16ead36507f5297cf82c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 553786c686942337442f07bb2b8e89c5745b2928..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.97209599999996,0.00359,1110.0 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2ce4171e360972d5297c15ffa7f15707aebf1098..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 16:50:13,717][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:50:13,718][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:50:14,153][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 16:50:14,154][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:50:14,154][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:50:14,154][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:50:14,154][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:50:14,154][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:50:14,266][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:50:14,267][inference][INFO] - Running inference benchmark -[2023-08-25 16:50:14,388][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:50:14,390][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:50:14,431][inference][INFO] - + Forward pass peak memory: 467.97209599999996 (MB) -[2023-08-25 16:50:14,432][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:50:14,434][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:50:14,473][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:50:19,521][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-08-25 16:50:19,523][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-25 16:50:19,523][inference][INFO] - Saving inference results -[2023-08-25 16:50:19,530][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index dc4e85e5dbea0dc587c9c3a48f87c694d77617ad..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index b49296148d8a77330605c8d974a2975056f1030b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.45075199999997,0.00384,260.0,0.524,191.0 diff --git a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index dc27d14b554e4dba5599b4d4abe3ace04cc39e8e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:13:34_4b796978656e461177a83d58ec3c2b06152c63db/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 16:50:24,324][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:50:24,325][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:50:25,745][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 16:50:25,745][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:50:25,745][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:50:25,746][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:50:25,746][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:50:25,746][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:50:26,376][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:50:26,376][inference][INFO] - Running inference benchmark -[2023-08-25 16:50:26,574][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:50:26,619][inference][INFO] - + Forward pass peak memory: 469.45075199999997 (MB) -[2023-08-25 16:50:26,620][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:50:26,655][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:50:31,701][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-08-25 16:50:31,702][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-25 16:50:31,703][inference][INFO] - + Warming up the generation pass -[2023-08-25 16:50:32,291][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 16:50:37,537][inference][INFO] - + Generation pass latency: 5.24e-01 (s) -[2023-08-25 16:50:37,538][inference][INFO] - + Generation pass throughput: 191.00 (tokens/s) -[2023-08-25 16:50:37,538][inference][INFO] - Saving inference results -[2023-08-25 16:50:37,551][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a4ff8b8f796ef59fe854cb5c222dedcf8710e31e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 835d6ebb091d9434f85420366179908c461a8ba5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.54668799999996,0.00308,325.0 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/main.log deleted file mode 100644 index 779742971f83411802e997b6459bfb2962fbbd3a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 16:51:45,362][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:51:45,363][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:51:46,629][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 16:51:46,629][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:51:46,630][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:51:46,630][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:51:46,630][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:51:46,630][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:51:47,251][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:51:47,252][inference][INFO] - Running inference benchmark -[2023-08-25 16:51:47,373][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:51:47,375][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:51:47,435][inference][INFO] - + Forward pass peak memory: 466.54668799999996 (MB) -[2023-08-25 16:51:47,436][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:51:47,438][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:51:47,469][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:51:52,522][inference][INFO] - + Forward pass latency: 3.08e-03 (s) -[2023-08-25 16:51:52,524][inference][INFO] - + Forward pass throughput: 325.00 (samples/s) -[2023-08-25 16:51:52,524][inference][INFO] - Saving inference results -[2023-08-25 16:51:52,535][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 859d9c64dfa7c98e5fa58d70c5415d6cdd84477f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 63c99fb780c605abdbd0155aaef86c2a40ba8e2d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.582976,0.00339,1180.0 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/main.log deleted file mode 100644 index 685ef1551aafd6bcb6739683ed42dc3e03ab517a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 16:51:52,912][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:51:52,913][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:51:53,365][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 16:51:53,365][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:51:53,365][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:51:53,365][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:51:53,366][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:51:53,366][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:51:53,486][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:51:53,487][inference][INFO] - Running inference benchmark -[2023-08-25 16:51:53,610][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:51:53,611][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:51:53,655][inference][INFO] - + Forward pass peak memory: 467.582976 (MB) -[2023-08-25 16:51:53,656][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:51:53,658][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:51:53,694][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:51:58,745][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-08-25 16:51:58,746][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-25 16:51:58,746][inference][INFO] - Saving inference results -[2023-08-25 16:51:58,755][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2a922e0598c678d9433695ba6a5b1ce9e93e4a21..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index ca146dc3ef38dcf38d7342d2879faefa5ff08375..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.069824,0.00385,260.0,0.488,205.0 diff --git a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 36eceafcd4e9294151981ed8d20b5e297fb31184..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:36:37_0040469bb8e718f4ffafef829e497805df1aa1fb/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 16:52:03,461][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:52:03,461][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:52:04,889][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 16:52:04,889][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:52:04,890][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:52:04,890][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:52:04,890][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:52:04,890][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:52:05,556][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:52:05,557][inference][INFO] - Running inference benchmark -[2023-08-25 16:52:05,961][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:52:06,010][inference][INFO] - + Forward pass peak memory: 469.069824 (MB) -[2023-08-25 16:52:06,012][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:52:06,046][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:52:11,088][inference][INFO] - + Forward pass latency: 3.85e-03 (s) -[2023-08-25 16:52:11,090][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-25 16:52:11,091][inference][INFO] - + Warming up the generation pass -[2023-08-25 16:52:11,656][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 16:52:17,029][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-08-25 16:52:17,031][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-25 16:52:17,031][inference][INFO] - Saving inference results -[2023-08-25 16:52:17,043][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1e98a88929afe1e28c1e9d287069efdfc1723a4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 1eb72163d4705e7bc5fb26047ce455647a88d1cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.43904,0.00305,328.0 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/main.log deleted file mode 100644 index 90f6626ac82ee71bccc42c7bb8602a6c8690c81e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 16:53:23,442][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:53:23,443][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:53:24,698][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 16:53:24,699][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:53:24,699][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:53:24,699][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:53:24,699][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:53:24,699][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:53:25,383][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:53:25,384][inference][INFO] - Running inference benchmark -[2023-08-25 16:53:25,522][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:53:25,523][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:53:25,585][inference][INFO] - + Forward pass peak memory: 468.43904 (MB) -[2023-08-25 16:53:25,586][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:53:25,588][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:53:25,623][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:53:30,675][inference][INFO] - + Forward pass latency: 3.05e-03 (s) -[2023-08-25 16:53:30,677][inference][INFO] - + Forward pass throughput: 328.00 (samples/s) -[2023-08-25 16:53:30,677][inference][INFO] - Saving inference results -[2023-08-25 16:53:30,688][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e2cb2f00747a5a87d0173c5db95682c6873eeaed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 5531b9c38a3c564a2552aaa11437b11335773064..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.51219199999997,0.00344,1160.0 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 13796b0fc18afc65456d287e34f960ea39465a82..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 16:53:31,084][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:53:31,085][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:53:31,541][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 16:53:31,541][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:53:31,541][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:53:31,542][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:53:31,542][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:53:31,542][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:53:31,665][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:53:31,666][inference][INFO] - Running inference benchmark -[2023-08-25 16:53:31,797][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:53:31,799][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:53:31,844][inference][INFO] - + Forward pass peak memory: 469.51219199999997 (MB) -[2023-08-25 16:53:31,845][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 16:53:31,847][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:53:31,898][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:53:36,947][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-08-25 16:53:36,948][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-25 16:53:36,948][inference][INFO] - Saving inference results -[2023-08-25 16:53:36,957][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 45cfad8fef353a63dd199520edc92a37d8c84704..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 989e453b0bb9bc80e0db4bfb00374aa40ae62f19..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.2992,0.00391,256.0,0.515,194.0 diff --git a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b521c13da5d45c93c0b8e8e4288af4fbc7b8cd57..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_15:46:56_74081cb5fa52540bbdde620942bd3a657af85c8e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 16:53:41,645][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 16:53:41,646][benchmark][INFO] - + Setting seed(42) -[2023-08-25 16:53:43,061][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 16:53:43,061][backend][INFO] - Configuring pytorch backend -[2023-08-25 16:53:43,062][backend][INFO] - + Checking initial device isolation -[2023-08-25 16:53:43,062][backend][INFO] - + Checking contineous device isolation -[2023-08-25 16:53:43,062][pytorch][INFO] - + Disabling gradients -[2023-08-25 16:53:43,062][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 16:53:43,728][pytorch][INFO] - + Turning on eval mode -[2023-08-25 16:53:43,728][inference][INFO] - Running inference benchmark -[2023-08-25 16:53:44,077][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 16:53:44,129][inference][INFO] - + Forward pass peak memory: 469.2992 (MB) -[2023-08-25 16:53:44,131][inference][INFO] - + Warming up the forward pass -[2023-08-25 16:53:44,170][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 16:53:49,214][inference][INFO] - + Forward pass latency: 3.91e-03 (s) -[2023-08-25 16:53:49,216][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-08-25 16:53:49,217][inference][INFO] - + Warming up the generation pass -[2023-08-25 16:53:49,809][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 16:53:54,956][inference][INFO] - + Generation pass latency: 5.15e-01 (s) -[2023-08-25 16:53:54,958][inference][INFO] - + Generation pass throughput: 194.00 (tokens/s) -[2023-08-25 16:53:54,958][inference][INFO] - Saving inference results -[2023-08-25 16:53:54,971][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7f1f04576c8379e99856d0ed0d1a4bd53236871e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0245f7ae6ebd9401a0337c0de605c20205e1f122..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.18976,0.00313,319.0 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/main.log deleted file mode 100644 index ef3d27b5858ef26f01ae59d35318557abfc085da..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 18:49:58,002][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 18:49:58,003][benchmark][INFO] - + Setting seed(42) -[2023-08-25 18:49:59,250][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 18:49:59,250][backend][INFO] - Configuring pytorch backend -[2023-08-25 18:49:59,250][backend][INFO] - + Checking initial device isolation -[2023-08-25 18:49:59,250][backend][INFO] - + Checking contineous device isolation -[2023-08-25 18:49:59,251][pytorch][INFO] - + Disabling gradients -[2023-08-25 18:49:59,251][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 18:49:59,883][pytorch][INFO] - + Turning on eval mode -[2023-08-25 18:49:59,884][inference][INFO] - Running inference benchmark -[2023-08-25 18:50:00,010][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:50:00,012][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 18:50:00,071][inference][INFO] - + Forward pass peak memory: 467.18976 (MB) -[2023-08-25 18:50:00,072][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:50:00,074][inference][INFO] - + Warming up the forward pass -[2023-08-25 18:50:00,110][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 18:50:05,161][inference][INFO] - + Forward pass latency: 3.13e-03 (s) -[2023-08-25 18:50:05,163][inference][INFO] - + Forward pass throughput: 319.00 (samples/s) -[2023-08-25 18:50:05,163][inference][INFO] - Saving inference results -[2023-08-25 18:50:05,175][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 747e5738bd77b10a7dee47948fa488e23590c9bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d52670ca90de69cff076fac99f26e05a09482c42..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.23424,0.00347,1150.0 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/main.log deleted file mode 100644 index cf41eb1a1bf938a6849670249ab0782b1d0d5a41..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 18:50:05,565][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 18:50:05,566][benchmark][INFO] - + Setting seed(42) -[2023-08-25 18:50:06,018][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 18:50:06,018][backend][INFO] - Configuring pytorch backend -[2023-08-25 18:50:06,018][backend][INFO] - + Checking initial device isolation -[2023-08-25 18:50:06,019][backend][INFO] - + Checking contineous device isolation -[2023-08-25 18:50:06,019][pytorch][INFO] - + Disabling gradients -[2023-08-25 18:50:06,019][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 18:50:06,137][pytorch][INFO] - + Turning on eval mode -[2023-08-25 18:50:06,138][inference][INFO] - Running inference benchmark -[2023-08-25 18:50:06,271][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:50:06,272][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 18:50:06,315][inference][INFO] - + Forward pass peak memory: 468.23424 (MB) -[2023-08-25 18:50:06,315][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:50:06,317][inference][INFO] - + Warming up the forward pass -[2023-08-25 18:50:06,354][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 18:50:11,399][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-08-25 18:50:11,400][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-25 18:50:11,400][inference][INFO] - Saving inference results -[2023-08-25 18:50:11,408][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2b45f993174d9d0b1c2a048e2db376735a5d431f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2985453a1dddf56d2c009922df00403a8abdbade..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.05343999999997,0.0033,303.0,0.483,207.0 diff --git a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 71ad357dec69c478714b9eef5f7f52260ee3a6f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_16:57:40_015f8e110d270a0ad42de4ae5b98198d69eb1964/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 18:50:16,342][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 18:50:16,343][benchmark][INFO] - + Setting seed(42) -[2023-08-25 18:50:17,744][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 18:50:17,744][backend][INFO] - Configuring pytorch backend -[2023-08-25 18:50:17,745][backend][INFO] - + Checking initial device isolation -[2023-08-25 18:50:17,745][backend][INFO] - + Checking contineous device isolation -[2023-08-25 18:50:17,745][pytorch][INFO] - + Disabling gradients -[2023-08-25 18:50:17,745][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 18:50:18,368][pytorch][INFO] - + Turning on eval mode -[2023-08-25 18:50:18,368][inference][INFO] - Running inference benchmark -[2023-08-25 18:50:18,561][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 18:50:18,608][inference][INFO] - + Forward pass peak memory: 469.05343999999997 (MB) -[2023-08-25 18:50:18,610][inference][INFO] - + Warming up the forward pass -[2023-08-25 18:50:18,641][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 18:50:23,688][inference][INFO] - + Forward pass latency: 3.30e-03 (s) -[2023-08-25 18:50:23,690][inference][INFO] - + Forward pass throughput: 303.00 (samples/s) -[2023-08-25 18:50:23,691][inference][INFO] - + Warming up the generation pass -[2023-08-25 18:50:24,181][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 18:50:29,500][inference][INFO] - + Generation pass latency: 4.83e-01 (s) -[2023-08-25 18:50:29,502][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-25 18:50:29,502][inference][INFO] - Saving inference results -[2023-08-25 18:50:29,513][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f354e1e93c8d9598ac37ceeb20a76c5cf6a8eab3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3b77b8852ec4354f7740d1a6ad77aaf5529d3f73..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.042304,0.00372,269.0 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 56f093811993e7435f667ec4fdee954161f92832..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 18:51:38,537][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 18:51:38,539][benchmark][INFO] - + Setting seed(42) -[2023-08-25 18:51:39,905][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 18:51:39,906][backend][INFO] - Configuring pytorch backend -[2023-08-25 18:51:39,906][backend][INFO] - + Checking initial device isolation -[2023-08-25 18:51:39,906][backend][INFO] - + Checking contineous device isolation -[2023-08-25 18:51:39,906][pytorch][INFO] - + Disabling gradients -[2023-08-25 18:51:39,907][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 18:51:40,540][pytorch][INFO] - + Turning on eval mode -[2023-08-25 18:51:40,540][inference][INFO] - Running inference benchmark -[2023-08-25 18:51:40,658][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:51:40,659][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 18:51:40,721][inference][INFO] - + Forward pass peak memory: 467.042304 (MB) -[2023-08-25 18:51:40,722][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:51:40,724][inference][INFO] - + Warming up the forward pass -[2023-08-25 18:51:40,756][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 18:51:45,802][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-08-25 18:51:45,804][inference][INFO] - + Forward pass throughput: 269.00 (samples/s) -[2023-08-25 18:51:45,804][inference][INFO] - Saving inference results -[2023-08-25 18:51:45,814][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3491527ae4be373cb9bb7854ad314ec6f5066e01..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 513b8d6a23ead35938c6f06e440b4934e1a11d10..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.92704,0.0036,1110.0 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/main.log deleted file mode 100644 index d6a4162bcc32abe44f46d660e95b584e5ec0de12..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-25 18:51:46,181][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 18:51:46,182][benchmark][INFO] - + Setting seed(42) -[2023-08-25 18:51:47,298][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-25 18:51:47,299][backend][INFO] - Configuring pytorch backend -[2023-08-25 18:51:47,299][backend][INFO] - + Checking initial device isolation -[2023-08-25 18:51:47,299][backend][INFO] - + Checking contineous device isolation -[2023-08-25 18:51:47,299][pytorch][INFO] - + Disabling gradients -[2023-08-25 18:51:47,299][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 18:51:47,425][pytorch][INFO] - + Turning on eval mode -[2023-08-25 18:51:47,425][inference][INFO] - Running inference benchmark -[2023-08-25 18:51:47,558][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:51:47,559][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 18:51:47,603][inference][INFO] - + Forward pass peak memory: 467.92704 (MB) -[2023-08-25 18:51:47,604][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-25 18:51:47,606][inference][INFO] - + Warming up the forward pass -[2023-08-25 18:51:47,649][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 18:51:52,693][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-08-25 18:51:52,694][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-25 18:51:52,694][inference][INFO] - Saving inference results -[2023-08-25 18:51:52,703][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f4cfddab1d818add5843199609932efef278133e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 78d77d0ffc8cfb426a7bff06da46e6470e5e2735..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.028864,0.00381,262.0,0.486,206.0 diff --git a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5a820c37461adf327a8483b868c570d4368e6ec1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-25_17:59:29_960807f62e53676723ab8281019219864ef3db4d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-25 18:51:57,429][benchmark][INFO] - Configuring inference benchmark -[2023-08-25 18:51:57,430][benchmark][INFO] - + Setting seed(42) -[2023-08-25 18:51:58,910][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-25 18:51:58,910][backend][INFO] - Configuring pytorch backend -[2023-08-25 18:51:58,910][backend][INFO] - + Checking initial device isolation -[2023-08-25 18:51:58,911][backend][INFO] - + Checking contineous device isolation -[2023-08-25 18:51:58,911][pytorch][INFO] - + Disabling gradients -[2023-08-25 18:51:58,911][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-25 18:51:59,559][pytorch][INFO] - + Turning on eval mode -[2023-08-25 18:51:59,560][inference][INFO] - Running inference benchmark -[2023-08-25 18:51:59,753][inference][INFO] - + Tracking forward pass peak memory -[2023-08-25 18:51:59,804][inference][INFO] - + Forward pass peak memory: 469.028864 (MB) -[2023-08-25 18:51:59,806][inference][INFO] - + Warming up the forward pass -[2023-08-25 18:51:59,843][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-25 18:52:04,888][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-08-25 18:52:04,890][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-25 18:52:04,891][inference][INFO] - + Warming up the generation pass -[2023-08-25 18:52:05,474][inference][INFO] - + Tracking generation latency and throughput -[2023-08-25 18:52:10,824][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-25 18:52:10,825][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-25 18:52:10,825][inference][INFO] - Saving inference results -[2023-08-25 18:52:10,837][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3df370a6a991ded55d62dfcaf7055367c471f882..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c54b4a2d7357b677442eced2efb46172d679e86d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.144704,0.00362,276.0 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/main.log b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/main.log deleted file mode 100644 index 724ec15fcb1c0692a978af9819e6b3624ecf14f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-26 20:49:53,452][benchmark][INFO] - Configuring inference benchmark -[2023-08-26 20:49:53,453][benchmark][INFO] - + Setting seed(42) -[2023-08-26 20:49:54,849][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-26 20:49:54,850][backend][INFO] - Configuring pytorch backend -[2023-08-26 20:49:54,850][backend][INFO] - + Checking initial device isolation -[2023-08-26 20:49:54,850][backend][INFO] - + Checking contineous device isolation -[2023-08-26 20:49:54,850][pytorch][INFO] - + Disabling gradients -[2023-08-26 20:49:54,850][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-26 20:49:55,483][pytorch][INFO] - + Turning on eval mode -[2023-08-26 20:49:55,484][inference][INFO] - Running inference benchmark -[2023-08-26 20:49:55,613][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-26 20:49:55,615][inference][INFO] - + Tracking forward pass peak memory -[2023-08-26 20:49:55,678][inference][INFO] - + Forward pass peak memory: 467.144704 (MB) -[2023-08-26 20:49:55,679][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-26 20:49:55,681][inference][INFO] - + Warming up the forward pass -[2023-08-26 20:49:55,717][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-26 20:50:00,764][inference][INFO] - + Forward pass latency: 3.62e-03 (s) -[2023-08-26 20:50:00,765][inference][INFO] - + Forward pass throughput: 276.00 (samples/s) -[2023-08-26 20:50:00,765][inference][INFO] - Saving inference results -[2023-08-26 20:50:00,776][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index afe5e529044006b633a6ed524e489b2dd31631e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 8dabf7c17c770d654c4ea6cae0beebd16ff60a9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.127744,0.0042,952.0 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/main.log b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/main.log deleted file mode 100644 index e683fd78645d1445754c4fecc232dfba07c1b92a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-26 20:50:01,151][benchmark][INFO] - Configuring inference benchmark -[2023-08-26 20:50:01,152][benchmark][INFO] - + Setting seed(42) -[2023-08-26 20:50:01,604][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-26 20:50:01,605][backend][INFO] - Configuring pytorch backend -[2023-08-26 20:50:01,605][backend][INFO] - + Checking initial device isolation -[2023-08-26 20:50:01,605][backend][INFO] - + Checking contineous device isolation -[2023-08-26 20:50:01,605][pytorch][INFO] - + Disabling gradients -[2023-08-26 20:50:01,605][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-26 20:50:01,729][pytorch][INFO] - + Turning on eval mode -[2023-08-26 20:50:01,730][inference][INFO] - Running inference benchmark -[2023-08-26 20:50:01,863][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-26 20:50:01,864][inference][INFO] - + Tracking forward pass peak memory -[2023-08-26 20:50:01,908][inference][INFO] - + Forward pass peak memory: 468.127744 (MB) -[2023-08-26 20:50:01,909][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-26 20:50:01,910][inference][INFO] - + Warming up the forward pass -[2023-08-26 20:50:01,953][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-26 20:50:06,996][inference][INFO] - + Forward pass latency: 4.20e-03 (s) -[2023-08-26 20:50:06,997][inference][INFO] - + Forward pass throughput: 952.00 (samples/s) -[2023-08-26 20:50:06,997][inference][INFO] - Saving inference results -[2023-08-26 20:50:07,005][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e797a392819bf6b3184e6b6ef4d8c528f8bf3fc7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a788b2c9e9cdf64f7d956df53df09b648aba5797..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.36064,0.00379,264.0,0.493,203.0 diff --git a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b58d0017fa2350349eae145588b62d43b3219f4e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-26_19:08:47_686c68f64c9d0181bd54d4d2e2446543c3eca1fa/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-26 20:50:11,786][benchmark][INFO] - Configuring inference benchmark -[2023-08-26 20:50:11,788][benchmark][INFO] - + Setting seed(42) -[2023-08-26 20:50:13,219][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-26 20:50:13,219][backend][INFO] - Configuring pytorch backend -[2023-08-26 20:50:13,219][backend][INFO] - + Checking initial device isolation -[2023-08-26 20:50:13,219][backend][INFO] - + Checking contineous device isolation -[2023-08-26 20:50:13,220][pytorch][INFO] - + Disabling gradients -[2023-08-26 20:50:13,220][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-26 20:50:13,874][pytorch][INFO] - + Turning on eval mode -[2023-08-26 20:50:13,875][inference][INFO] - Running inference benchmark -[2023-08-26 20:50:14,071][inference][INFO] - + Tracking forward pass peak memory -[2023-08-26 20:50:14,119][inference][INFO] - + Forward pass peak memory: 469.36064 (MB) -[2023-08-26 20:50:14,121][inference][INFO] - + Warming up the forward pass -[2023-08-26 20:50:14,153][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-26 20:50:19,197][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-26 20:50:19,199][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-26 20:50:19,199][inference][INFO] - + Warming up the generation pass -[2023-08-26 20:50:19,796][inference][INFO] - + Tracking generation latency and throughput -[2023-08-26 20:50:25,225][inference][INFO] - + Generation pass latency: 4.93e-01 (s) -[2023-08-26 20:50:25,227][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-26 20:50:25,227][inference][INFO] - Saving inference results -[2023-08-26 20:50:25,238][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ee164adc260b46cf707fd8c9a1b16b5ec9b39682..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index fb02325a5257ceae680b22f23029a2a3a667453b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.66547199999997,0.00316,316.0 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/main.log b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9f2ba38738acf309693f9dca60d13120e9c8b1c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 10:49:58,951][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 10:49:58,952][benchmark][INFO] - + Setting seed(42) -[2023-08-28 10:50:00,366][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 10:50:00,366][backend][INFO] - Configuring pytorch backend -[2023-08-28 10:50:00,366][backend][INFO] - + Checking initial device isolation -[2023-08-28 10:50:00,366][backend][INFO] - + Checking contineous device isolation -[2023-08-28 10:50:00,367][pytorch][INFO] - + Disabling gradients -[2023-08-28 10:50:00,367][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 10:50:00,988][pytorch][INFO] - + Turning on eval mode -[2023-08-28 10:50:00,988][inference][INFO] - Running inference benchmark -[2023-08-28 10:50:01,114][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 10:50:01,115][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 10:50:01,174][inference][INFO] - + Forward pass peak memory: 466.66547199999997 (MB) -[2023-08-28 10:50:01,175][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 10:50:01,177][inference][INFO] - + Warming up the forward pass -[2023-08-28 10:50:01,209][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 10:50:06,259][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-28 10:50:06,261][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-28 10:50:06,261][inference][INFO] - Saving inference results -[2023-08-28 10:50:06,272][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 34859b6db5d472b8911237e44d14dd37facd8041..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 68fb99b036fb4cd5845d943dee1d7cb33ee38e07..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.73043199999995,0.00354,1130.0 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/main.log b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/main.log deleted file mode 100644 index 56c261d13b9594ee5784e087cf2d0cfacb287ad6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 10:50:06,651][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 10:50:06,652][benchmark][INFO] - + Setting seed(42) -[2023-08-28 10:50:07,106][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 10:50:07,106][backend][INFO] - Configuring pytorch backend -[2023-08-28 10:50:07,107][backend][INFO] - + Checking initial device isolation -[2023-08-28 10:50:07,107][backend][INFO] - + Checking contineous device isolation -[2023-08-28 10:50:07,107][pytorch][INFO] - + Disabling gradients -[2023-08-28 10:50:07,107][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 10:50:07,231][pytorch][INFO] - + Turning on eval mode -[2023-08-28 10:50:07,232][inference][INFO] - Running inference benchmark -[2023-08-28 10:50:07,355][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 10:50:07,356][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 10:50:07,399][inference][INFO] - + Forward pass peak memory: 467.73043199999995 (MB) -[2023-08-28 10:50:07,400][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 10:50:07,402][inference][INFO] - + Warming up the forward pass -[2023-08-28 10:50:07,454][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 10:50:12,500][inference][INFO] - + Forward pass latency: 3.54e-03 (s) -[2023-08-28 10:50:12,502][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-28 10:50:12,502][inference][INFO] - Saving inference results -[2023-08-28 10:50:12,511][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index aa51c1766b2c3af90d3e16df6730b12df4c63694..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3d4f2ce5b918241a08a1b12b31c1b018340df0e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.09439999999995,0.00377,265.0,0.513,195.0 diff --git a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4adc0412070e83ee4b534290279b9376ea3abe6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_10:07:31_de139702a17003c7dd02e671a9a7417d346c3df2/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-28 10:50:17,280][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 10:50:17,281][benchmark][INFO] - + Setting seed(42) -[2023-08-28 10:50:19,086][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-28 10:50:19,087][backend][INFO] - Configuring pytorch backend -[2023-08-28 10:50:19,087][backend][INFO] - + Checking initial device isolation -[2023-08-28 10:50:19,087][backend][INFO] - + Checking contineous device isolation -[2023-08-28 10:50:19,087][pytorch][INFO] - + Disabling gradients -[2023-08-28 10:50:19,088][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 10:50:19,779][pytorch][INFO] - + Turning on eval mode -[2023-08-28 10:50:19,779][inference][INFO] - Running inference benchmark -[2023-08-28 10:50:19,977][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 10:50:20,023][inference][INFO] - + Forward pass peak memory: 469.09439999999995 (MB) -[2023-08-28 10:50:20,025][inference][INFO] - + Warming up the forward pass -[2023-08-28 10:50:20,056][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 10:50:25,101][inference][INFO] - + Forward pass latency: 3.77e-03 (s) -[2023-08-28 10:50:25,103][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-28 10:50:25,104][inference][INFO] - + Warming up the generation pass -[2023-08-28 10:50:25,690][inference][INFO] - + Tracking generation latency and throughput -[2023-08-28 10:50:30,819][inference][INFO] - + Generation pass latency: 5.13e-01 (s) -[2023-08-28 10:50:30,820][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-08-28 10:50:30,820][inference][INFO] - Saving inference results -[2023-08-28 10:50:30,834][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 96c4d67f43905fdd1fc94a35070b04086ab19258..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 859337cd3ce7dc19b02c4bec1143e22024b73253..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.915328,0.00385,260.0 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/main.log b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/main.log deleted file mode 100644 index c55078e936333e2393c773534b14ab59e50ee668..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:49:44,230][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:49:44,231][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:49:45,467][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:49:45,467][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:49:45,467][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:49:45,467][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:49:45,468][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:49:45,468][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:49:46,095][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:49:46,096][inference][INFO] - Running inference benchmark -[2023-08-28 14:49:46,217][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:49:46,218][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:49:46,279][inference][INFO] - + Forward pass peak memory: 466.915328 (MB) -[2023-08-28 14:49:46,280][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:49:46,282][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:49:46,319][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:49:51,364][inference][INFO] - + Forward pass latency: 3.85e-03 (s) -[2023-08-28 14:49:51,366][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-28 14:49:51,366][inference][INFO] - Saving inference results -[2023-08-28 14:49:51,378][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4b9308652303003d0893fe8024fbe9eaaccdb303..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 2f533c5571c02e87801492e9c706559dd9f57bee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.8656,0.00427,937.0 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/main.log b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/main.log deleted file mode 100644 index 5390fce4869074458e1f397f4c99e4edbb754ec2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:49:51,763][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:49:51,764][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:49:52,194][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:49:52,194][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:49:52,194][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:49:52,195][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:49:52,195][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:49:52,195][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:49:52,314][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:49:52,315][inference][INFO] - Running inference benchmark -[2023-08-28 14:49:52,443][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:49:52,444][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:49:52,484][inference][INFO] - + Forward pass peak memory: 467.8656 (MB) -[2023-08-28 14:49:52,485][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:49:52,487][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:49:52,530][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:49:57,572][inference][INFO] - + Forward pass latency: 4.27e-03 (s) -[2023-08-28 14:49:57,572][inference][INFO] - + Forward pass throughput: 937.00 (samples/s) -[2023-08-28 14:49:57,573][inference][INFO] - Saving inference results -[2023-08-28 14:49:57,580][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7b2e6cad22bbd053a24c5012b5f0fcb60319d7ef..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8268b6dced0cda17a8be5eb733f475de5ab83f4b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.311488,0.00382,262.0,0.486,206.0 diff --git a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ef62b59ba3b7d11259c88bf8c7fadb368f4e309d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_12:58:23_cb91ec67b54c1a8a9a24825165161c90fe7c0e51/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-28 14:50:02,334][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:50:02,335][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:50:03,890][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-28 14:50:03,890][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:50:03,890][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:50:03,890][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:50:03,891][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:50:03,891][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:50:04,535][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:50:04,535][inference][INFO] - Running inference benchmark -[2023-08-28 14:50:04,741][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:50:04,791][inference][INFO] - + Forward pass peak memory: 469.311488 (MB) -[2023-08-28 14:50:04,792][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:50:04,825][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:50:09,872][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-08-28 14:50:09,873][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-08-28 14:50:09,874][inference][INFO] - + Warming up the generation pass -[2023-08-28 14:50:10,424][inference][INFO] - + Tracking generation latency and throughput -[2023-08-28 14:50:15,774][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-28 14:50:15,775][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-28 14:50:15,775][inference][INFO] - Saving inference results -[2023-08-28 14:50:15,786][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index d4065eb13216e5b1443c11fd09067d3eb4bf1f7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 41943464d1c455eed2c6bf4abde39eb6b0c90187..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.02649599999995,0.00313,319.0 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/main.log b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/main.log deleted file mode 100644 index 42b2427b62cfcc26196787e5e74251bcd0d99e36..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:51:24,329][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:51:24,330][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:51:25,587][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:51:25,587][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:51:25,588][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:51:25,588][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:51:25,588][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:51:25,588][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:51:26,316][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:51:26,317][inference][INFO] - Running inference benchmark -[2023-08-28 14:51:26,441][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:51:26,443][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:51:26,503][inference][INFO] - + Forward pass peak memory: 466.02649599999995 (MB) -[2023-08-28 14:51:26,504][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:51:26,506][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:51:26,542][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:51:31,593][inference][INFO] - + Forward pass latency: 3.13e-03 (s) -[2023-08-28 14:51:31,594][inference][INFO] - + Forward pass throughput: 319.00 (samples/s) -[2023-08-28 14:51:31,594][inference][INFO] - Saving inference results -[2023-08-28 14:51:31,607][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index b612de80992b4707cde447d6ad798b20acadcb8b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 19020ed2d7d4aed9105a266dfd6862af81b4ed37..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.021824,0.00345,1160.0 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/main.log b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8aa45cf5208936b624d9d0524de4c72bf6cdce38..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:51:31,978][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:51:31,979][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:51:32,424][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:51:32,424][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:51:32,425][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:51:32,425][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:51:32,425][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:51:32,425][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:51:32,634][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:51:32,634][inference][INFO] - Running inference benchmark -[2023-08-28 14:51:32,888][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:51:32,889][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:51:32,932][inference][INFO] - + Forward pass peak memory: 467.021824 (MB) -[2023-08-28 14:51:32,933][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:51:32,934][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:51:32,973][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:51:38,018][inference][INFO] - + Forward pass latency: 3.45e-03 (s) -[2023-08-28 14:51:38,019][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-28 14:51:38,019][inference][INFO] - Saving inference results -[2023-08-28 14:51:38,026][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e07702f224e7f8ffcc743ed2ccacf2363ae36bcb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index daf33b067f00163cfa25e8fd9dfca4045d52d1fc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.090304,0.00387,258.0,0.513,195.0 diff --git a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4d2d9e6cef59ff1d72d217a9b5b89d25e9f4c5f7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:22_ed915cff9751e3e41ebb4733b87c45c938daf116/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-28 14:51:42,952][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:51:42,953][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:51:44,385][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-28 14:51:44,385][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:51:44,386][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:51:44,386][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:51:44,386][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:51:44,386][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:51:45,035][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:51:45,035][inference][INFO] - Running inference benchmark -[2023-08-28 14:51:45,249][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:51:45,299][inference][INFO] - + Forward pass peak memory: 469.090304 (MB) -[2023-08-28 14:51:45,301][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:51:45,338][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:51:50,386][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-08-28 14:51:50,388][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-28 14:51:50,389][inference][INFO] - + Warming up the generation pass -[2023-08-28 14:51:50,977][inference][INFO] - + Tracking generation latency and throughput -[2023-08-28 14:51:56,110][inference][INFO] - + Generation pass latency: 5.13e-01 (s) -[2023-08-28 14:51:56,110][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-08-28 14:51:56,110][inference][INFO] - Saving inference results -[2023-08-28 14:51:56,122][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f9014df8a16c43ca6b51277412ff991289914f51..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 7d9cb3c2d34574efc3893d6f7f15d203f6ca8af1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.042304,0.0031,323.0 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/main.log b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/main.log deleted file mode 100644 index 2bf9ea7bda7dbb7a625e7916c5ad47f33ff21f4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:53:02,057][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:53:02,058][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:53:03,293][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:53:03,293][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:53:03,294][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:53:03,294][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:53:03,294][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:53:03,294][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:53:03,911][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:53:03,912][inference][INFO] - Running inference benchmark -[2023-08-28 14:53:04,031][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:53:04,032][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:53:04,096][inference][INFO] - + Forward pass peak memory: 467.042304 (MB) -[2023-08-28 14:53:04,097][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:53:04,099][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:53:04,131][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:53:09,182][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-28 14:53:09,184][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-28 14:53:09,184][inference][INFO] - Saving inference results -[2023-08-28 14:53:09,194][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5d7b92c0186d3866a9c9a2b13b3629dd7798c465..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 72182c88192d759a6d223f4d1a40dad7be7eba3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.0704,0.00346,1160.0 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/main.log b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/main.log deleted file mode 100644 index 47eeebd5053e96135e6b334d3f8e2e504b73377c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:53:09,585][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:53:09,586][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:53:10,027][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:53:10,028][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:53:10,028][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:53:10,028][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:53:10,028][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:53:10,028][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:53:10,146][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:53:10,147][inference][INFO] - Running inference benchmark -[2023-08-28 14:53:10,276][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:53:10,277][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:53:10,320][inference][INFO] - + Forward pass peak memory: 468.0704 (MB) -[2023-08-28 14:53:10,321][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:53:10,323][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:53:10,379][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:53:15,427][inference][INFO] - + Forward pass latency: 3.46e-03 (s) -[2023-08-28 14:53:15,428][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-28 14:53:15,428][inference][INFO] - Saving inference results -[2023-08-28 14:53:15,437][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9ef0555a8e194e7d897eecbd2da4acac4984049d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c2262ab9d67819167e939fbcfa91edf5345e39bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.635072,0.00309,324.0,0.479,209.0 diff --git a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 173fc330a211dff3e1d9c0c6fac94de6fa0ecb29..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_13:31:33_886b6be081e1bc28e8c6cbc93eba934f83677ab2/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-28 14:53:20,270][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:53:20,271][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:53:21,708][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-28 14:53:21,709][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:53:21,709][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:53:21,709][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:53:21,709][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:53:21,709][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:53:22,368][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:53:22,368][inference][INFO] - Running inference benchmark -[2023-08-28 14:53:22,569][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:53:22,614][inference][INFO] - + Forward pass peak memory: 469.635072 (MB) -[2023-08-28 14:53:22,616][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:53:22,646][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:53:27,701][inference][INFO] - + Forward pass latency: 3.09e-03 (s) -[2023-08-28 14:53:27,703][inference][INFO] - + Forward pass throughput: 324.00 (samples/s) -[2023-08-28 14:53:27,704][inference][INFO] - + Warming up the generation pass -[2023-08-28 14:53:28,199][inference][INFO] - + Tracking generation latency and throughput -[2023-08-28 14:53:33,470][inference][INFO] - + Generation pass latency: 4.79e-01 (s) -[2023-08-28 14:53:33,470][inference][INFO] - + Generation pass throughput: 209.00 (tokens/s) -[2023-08-28 14:53:33,470][inference][INFO] - Saving inference results -[2023-08-28 14:53:33,485][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a9495722d55baca14f3cea64e881319a4159b0ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index dd85fc8c75c2346fe4b2475bb1d833e8817e7d06..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.36646399999995,0.00355,282.0 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/main.log b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/main.log deleted file mode 100644 index 7e46ec7da5d6f625f60a36190b23eb80af12ea54..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:54:39,161][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:54:39,162][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:54:40,699][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:54:40,699][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:54:40,700][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:54:40,700][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:54:40,700][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:54:40,700][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:54:41,328][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:54:41,328][inference][INFO] - Running inference benchmark -[2023-08-28 14:54:41,448][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:54:41,450][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:54:41,509][inference][INFO] - + Forward pass peak memory: 466.36646399999995 (MB) -[2023-08-28 14:54:41,510][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:54:41,512][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:54:41,549][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:54:46,596][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-08-28 14:54:46,597][inference][INFO] - + Forward pass throughput: 282.00 (samples/s) -[2023-08-28 14:54:46,597][inference][INFO] - Saving inference results -[2023-08-28 14:54:46,608][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a68143b0a53f13bcc7619539725637aed0fce42d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3cebea319aca8a9455d1c205478ba10c9dd7d1d0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.304448,0.00394,1020.0 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/main.log b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/main.log deleted file mode 100644 index f49e3b728eedfbf5ec042c8489ec51cef5ce97ac..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-28 14:54:46,980][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:54:46,981][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:54:47,439][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-28 14:54:47,439][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:54:47,439][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:54:47,439][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:54:47,439][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:54:47,439][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:54:47,605][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:54:47,605][inference][INFO] - Running inference benchmark -[2023-08-28 14:54:47,776][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:54:47,778][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:54:47,819][inference][INFO] - + Forward pass peak memory: 467.304448 (MB) -[2023-08-28 14:54:47,820][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-28 14:54:47,821][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:54:47,871][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:54:52,914][inference][INFO] - + Forward pass latency: 3.94e-03 (s) -[2023-08-28 14:54:52,915][inference][INFO] - + Forward pass throughput: 1020.00 (samples/s) -[2023-08-28 14:54:52,915][inference][INFO] - Saving inference results -[2023-08-28 14:54:52,922][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 93dbc3b2915de602dcad868cbb8a349715831a0b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3297bbb399d14821703d39555375901d36074ed9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.573632,0.00379,264.0,0.533,188.0 diff --git a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 451e91b9ea39f4761e90bcb7b74db0e0414a2a98..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-28_14:37:37_50573c648ae953dcc1b94d663651f07fb02268f4/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-28 14:54:57,661][benchmark][INFO] - Configuring inference benchmark -[2023-08-28 14:54:57,661][benchmark][INFO] - + Setting seed(42) -[2023-08-28 14:54:59,073][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-28 14:54:59,074][backend][INFO] - Configuring pytorch backend -[2023-08-28 14:54:59,074][backend][INFO] - + Checking initial device isolation -[2023-08-28 14:54:59,074][backend][INFO] - + Checking contineous device isolation -[2023-08-28 14:54:59,074][pytorch][INFO] - + Disabling gradients -[2023-08-28 14:54:59,074][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-28 14:54:59,731][pytorch][INFO] - + Turning on eval mode -[2023-08-28 14:54:59,732][inference][INFO] - Running inference benchmark -[2023-08-28 14:54:59,936][inference][INFO] - + Tracking forward pass peak memory -[2023-08-28 14:54:59,984][inference][INFO] - + Forward pass peak memory: 469.573632 (MB) -[2023-08-28 14:54:59,986][inference][INFO] - + Warming up the forward pass -[2023-08-28 14:55:00,021][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-28 14:55:05,068][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-28 14:55:05,070][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-28 14:55:05,071][inference][INFO] - + Warming up the generation pass -[2023-08-28 14:55:05,660][inference][INFO] - + Tracking generation latency and throughput -[2023-08-28 14:55:10,991][inference][INFO] - + Generation pass latency: 5.33e-01 (s) -[2023-08-28 14:55:10,993][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-08-28 14:55:10,993][inference][INFO] - Saving inference results -[2023-08-28 14:55:11,004][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5c2e274d60094995a53b8c023fdc2b84c190f144..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3f17be6d925f376d69c1f48a2e656478b0ba105e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.202048,0.00401,249.0 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/main.log deleted file mode 100644 index 3fd4492589608bc6b311cc1304ee527971302957..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 08:50:10,227][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:50:10,228][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:50:11,989][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 08:50:11,989][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:50:11,990][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:50:11,990][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:50:11,990][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:50:11,990][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:50:12,601][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:50:12,601][inference][INFO] - Running inference benchmark -[2023-08-29 08:50:12,724][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:50:12,725][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:50:12,786][inference][INFO] - + Forward pass peak memory: 467.202048 (MB) -[2023-08-29 08:50:12,787][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:50:12,789][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:50:12,833][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:50:17,876][inference][INFO] - + Forward pass latency: 4.01e-03 (s) -[2023-08-29 08:50:17,877][inference][INFO] - + Forward pass throughput: 249.00 (samples/s) -[2023-08-29 08:50:17,877][inference][INFO] - Saving inference results -[2023-08-29 08:50:17,887][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e89987c0cd422386ff11f6503d9054424a5a04d0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 59763254b5491269476248ebb49052dbb0d5a015..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.14412799999997,0.00445,899.0 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/main.log deleted file mode 100644 index b40900ac6d90addbd3c44778405983fa18b9e309..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 08:50:18,262][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:50:18,263][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:50:18,742][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 08:50:18,743][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:50:18,743][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:50:18,743][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:50:18,743][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:50:18,744][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:50:18,868][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:50:18,869][inference][INFO] - Running inference benchmark -[2023-08-29 08:50:19,000][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:50:19,001][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:50:19,043][inference][INFO] - + Forward pass peak memory: 468.14412799999997 (MB) -[2023-08-29 08:50:19,044][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:50:19,045][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:50:19,092][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:50:24,130][inference][INFO] - + Forward pass latency: 4.45e-03 (s) -[2023-08-29 08:50:24,131][inference][INFO] - + Forward pass throughput: 899.00 (samples/s) -[2023-08-29 08:50:24,131][inference][INFO] - Saving inference results -[2023-08-29 08:50:24,140][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index aae0255edb63a715ec1ec8dda1b947c4daf26719..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cf4d1ee54e00ba3f8b56c92f74d755b003b170dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.38521599999996,0.00371,270.0,0.49,204.0 diff --git a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a171699bd591da9674efd680e80ec3ff2a0d257e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:14_738ecd17d869577d263eb1fba3fee0ab8ec5b5a2/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 08:50:29,033][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:50:29,033][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:50:30,508][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 08:50:30,508][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:50:30,508][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:50:30,509][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:50:30,509][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:50:30,509][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:50:31,187][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:50:31,187][inference][INFO] - Running inference benchmark -[2023-08-29 08:50:31,394][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:50:31,442][inference][INFO] - + Forward pass peak memory: 469.38521599999996 (MB) -[2023-08-29 08:50:31,444][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:50:31,487][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:50:36,531][inference][INFO] - + Forward pass latency: 3.71e-03 (s) -[2023-08-29 08:50:36,532][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-08-29 08:50:36,533][inference][INFO] - + Warming up the generation pass -[2023-08-29 08:50:37,121][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 08:50:42,512][inference][INFO] - + Generation pass latency: 4.90e-01 (s) -[2023-08-29 08:50:42,514][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-08-29 08:50:42,514][inference][INFO] - Saving inference results -[2023-08-29 08:50:42,525][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7f0d6067368c3b50d5bd6ebd3e7bdbc391b9cd44..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 30e138d4b1b852e5456c29fc733cc4b4a04cbb06..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.492864,0.00377,265.0 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/main.log deleted file mode 100644 index b75d4f25cc834a95661c1849678cd7234509c9b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 08:51:51,890][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:51:51,891][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:51:53,270][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 08:51:53,270][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:51:53,270][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:51:53,271][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:51:53,271][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:51:53,271][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:51:53,892][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:51:53,893][inference][INFO] - Running inference benchmark -[2023-08-29 08:51:54,016][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:51:54,018][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:51:54,079][inference][INFO] - + Forward pass peak memory: 467.492864 (MB) -[2023-08-29 08:51:54,080][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:51:54,082][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:51:54,119][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:51:59,166][inference][INFO] - + Forward pass latency: 3.77e-03 (s) -[2023-08-29 08:51:59,167][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-29 08:51:59,168][inference][INFO] - Saving inference results -[2023-08-29 08:51:59,178][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1d5886eff80f90c7a75c3fef084b7810923e1ab0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 749371f6b8614d78d4d028367bf081d15884d9a0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.45952,0.00425,941.0 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4358026503c021fe5883b37308bcb635b7922f7c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 08:51:59,551][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:51:59,552][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:51:59,983][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 08:51:59,983][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:51:59,983][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:51:59,984][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:51:59,984][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:51:59,984][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:52:00,105][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:52:00,106][inference][INFO] - Running inference benchmark -[2023-08-29 08:52:00,231][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:52:00,233][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:52:00,274][inference][INFO] - + Forward pass peak memory: 468.45952 (MB) -[2023-08-29 08:52:00,275][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:52:00,277][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:52:00,321][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:52:05,362][inference][INFO] - + Forward pass latency: 4.25e-03 (s) -[2023-08-29 08:52:05,363][inference][INFO] - + Forward pass throughput: 941.00 (samples/s) -[2023-08-29 08:52:05,363][inference][INFO] - Saving inference results -[2023-08-29 08:52:05,371][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 171fcfbd898d45bcda41af29cefaa1982a54401e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 53374d5d3286c7890e8bb27115a8cbb4c9c0dce1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.344256,0.00316,316.0,0.48,208.0 diff --git a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5033d61382c587ed3c7782060d7bd82025cc5f50..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_07:22:37_39c37fe45c12bc2f936313330fe5c82319adb6e3/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 08:52:10,146][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:52:10,148][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:52:11,718][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 08:52:11,719][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:52:11,719][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:52:11,719][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:52:11,720][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:52:11,720][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:52:12,381][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:52:12,382][inference][INFO] - Running inference benchmark -[2023-08-29 08:52:12,582][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:52:12,626][inference][INFO] - + Forward pass peak memory: 469.344256 (MB) -[2023-08-29 08:52:12,627][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:52:12,659][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:52:17,707][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-29 08:52:17,709][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-29 08:52:17,709][inference][INFO] - + Warming up the generation pass -[2023-08-29 08:52:18,201][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 08:52:23,487][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-08-29 08:52:23,488][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-29 08:52:23,488][inference][INFO] - Saving inference results -[2023-08-29 08:52:23,500][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2468530cde9eae7d4bce4d92c4f9cd509c06e48b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8daa4cc0b557737cce3822b81885626ffb5f071d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.86208,0.00316,316.0 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/main.log deleted file mode 100644 index b07e5c2dc9900bcb9c563bdee1d7f5b0fc4538e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 08:53:30,295][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:53:30,296][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:53:31,594][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 08:53:31,594][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:53:31,595][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:53:31,595][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:53:31,595][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:53:31,595][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:53:32,201][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:53:32,201][inference][INFO] - Running inference benchmark -[2023-08-29 08:53:32,321][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:53:32,322][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:53:32,387][inference][INFO] - + Forward pass peak memory: 466.86208 (MB) -[2023-08-29 08:53:32,389][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:53:32,390][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:53:32,428][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:53:37,477][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-29 08:53:37,478][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-29 08:53:37,478][inference][INFO] - Saving inference results -[2023-08-29 08:53:37,489][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 6a17f06bd83ce596a15219672d351e5620e0f40c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7a11b3b7c72acc513a3227af4d7481bcccf66be6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.90246399999995,0.0035,1140.0 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/main.log deleted file mode 100644 index 524c33d44705c5392acffdee473b06b27015574f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 08:53:37,862][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:53:37,863][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:53:38,302][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 08:53:38,302][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:53:38,302][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:53:38,303][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:53:38,303][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:53:38,303][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:53:38,451][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:53:38,452][inference][INFO] - Running inference benchmark -[2023-08-29 08:53:38,579][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:53:38,581][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:53:38,628][inference][INFO] - + Forward pass peak memory: 467.90246399999995 (MB) -[2023-08-29 08:53:38,629][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 08:53:38,631][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:53:38,667][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:53:43,710][inference][INFO] - + Forward pass latency: 3.50e-03 (s) -[2023-08-29 08:53:43,712][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-08-29 08:53:43,712][inference][INFO] - Saving inference results -[2023-08-29 08:53:43,720][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a0faeffb84046947cc46bf29287f6ac1fe0452cb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 503ed2f813f86939fa291b8e8c7ba59b65ddc5ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.532672,0.00393,254.0,0.486,206.0 diff --git a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 3fcae87a8cedb1d7a013a94527e2e2de2167e4f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_08:09:45_99c3d44906ec448c4559fecdc9a63eda364db4d4/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 08:53:49,301][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 08:53:49,302][benchmark][INFO] - + Setting seed(42) -[2023-08-29 08:53:50,749][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 08:53:50,750][backend][INFO] - Configuring pytorch backend -[2023-08-29 08:53:50,750][backend][INFO] - + Checking initial device isolation -[2023-08-29 08:53:50,750][backend][INFO] - + Checking contineous device isolation -[2023-08-29 08:53:50,750][pytorch][INFO] - + Disabling gradients -[2023-08-29 08:53:50,750][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 08:53:51,575][pytorch][INFO] - + Turning on eval mode -[2023-08-29 08:53:51,576][inference][INFO] - Running inference benchmark -[2023-08-29 08:53:51,774][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 08:53:51,823][inference][INFO] - + Forward pass peak memory: 469.532672 (MB) -[2023-08-29 08:53:51,825][inference][INFO] - + Warming up the forward pass -[2023-08-29 08:53:51,858][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 08:53:56,902][inference][INFO] - + Forward pass latency: 3.93e-03 (s) -[2023-08-29 08:53:56,904][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-08-29 08:53:56,905][inference][INFO] - + Warming up the generation pass -[2023-08-29 08:53:57,400][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 08:54:02,750][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-29 08:54:02,751][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-29 08:54:02,751][inference][INFO] - Saving inference results -[2023-08-29 08:54:02,762][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e8580e1f887874adb718fd0e383961f789f607dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 10243f7c09939db10a9ae4862429289c824b8592..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.60403199999996,0.00365,274.0 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 909f51b388ef1b6749d52d48ecbd5dd84db6ef86..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 10:50:12,299][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 10:50:12,301][benchmark][INFO] - + Setting seed(42) -[2023-08-29 10:50:13,588][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 10:50:13,588][backend][INFO] - Configuring pytorch backend -[2023-08-29 10:50:13,589][backend][INFO] - + Checking initial device isolation -[2023-08-29 10:50:13,589][backend][INFO] - + Checking contineous device isolation -[2023-08-29 10:50:13,589][pytorch][INFO] - + Disabling gradients -[2023-08-29 10:50:13,589][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 10:50:14,233][pytorch][INFO] - + Turning on eval mode -[2023-08-29 10:50:14,234][inference][INFO] - Running inference benchmark -[2023-08-29 10:50:14,361][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:50:14,363][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 10:50:14,422][inference][INFO] - + Forward pass peak memory: 466.60403199999996 (MB) -[2023-08-29 10:50:14,424][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:50:14,425][inference][INFO] - + Warming up the forward pass -[2023-08-29 10:50:14,467][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 10:50:19,514][inference][INFO] - + Forward pass latency: 3.65e-03 (s) -[2023-08-29 10:50:19,515][inference][INFO] - + Forward pass throughput: 274.00 (samples/s) -[2023-08-29 10:50:19,515][inference][INFO] - Saving inference results -[2023-08-29 10:50:19,525][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8409b435e567933480ab10f607998a54ffb0cf6f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a82f1843ad1759465997784981b3c6e9e9019910..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.533824,0.00432,926.0 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 03a2860637dc6c1f5f8abc22dbd9749b2fa1058d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 10:50:19,901][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 10:50:19,903][benchmark][INFO] - + Setting seed(42) -[2023-08-29 10:50:20,851][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 10:50:20,851][backend][INFO] - Configuring pytorch backend -[2023-08-29 10:50:20,851][backend][INFO] - + Checking initial device isolation -[2023-08-29 10:50:20,851][backend][INFO] - + Checking contineous device isolation -[2023-08-29 10:50:20,852][pytorch][INFO] - + Disabling gradients -[2023-08-29 10:50:20,852][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 10:50:20,979][pytorch][INFO] - + Turning on eval mode -[2023-08-29 10:50:20,979][inference][INFO] - Running inference benchmark -[2023-08-29 10:50:21,113][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:50:21,114][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 10:50:21,155][inference][INFO] - + Forward pass peak memory: 467.533824 (MB) -[2023-08-29 10:50:21,156][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:50:21,158][inference][INFO] - + Warming up the forward pass -[2023-08-29 10:50:21,200][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 10:50:26,243][inference][INFO] - + Forward pass latency: 4.32e-03 (s) -[2023-08-29 10:50:26,244][inference][INFO] - + Forward pass throughput: 926.00 (samples/s) -[2023-08-29 10:50:26,245][inference][INFO] - Saving inference results -[2023-08-29 10:50:26,251][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index bb08a49e2d830887688dbb9492ebb07b2e4821b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index efe8a81998c0c6a53887828350d310aa269e8dbc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.286912,0.00334,299.0,0.481,208.0 diff --git a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 43eb3513d362641f9dec1000cb07dac5d7575bc6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_09:03:52_4c21da5e347bfc53ee4ec5b71a23721fefe6822c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 10:50:31,382][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 10:50:31,382][benchmark][INFO] - + Setting seed(42) -[2023-08-29 10:50:32,937][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 10:50:32,937][backend][INFO] - Configuring pytorch backend -[2023-08-29 10:50:32,937][backend][INFO] - + Checking initial device isolation -[2023-08-29 10:50:32,937][backend][INFO] - + Checking contineous device isolation -[2023-08-29 10:50:32,938][pytorch][INFO] - + Disabling gradients -[2023-08-29 10:50:32,938][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 10:50:33,729][pytorch][INFO] - + Turning on eval mode -[2023-08-29 10:50:33,730][inference][INFO] - Running inference benchmark -[2023-08-29 10:50:33,984][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 10:50:34,027][inference][INFO] - + Forward pass peak memory: 469.286912 (MB) -[2023-08-29 10:50:34,029][inference][INFO] - + Warming up the forward pass -[2023-08-29 10:50:34,059][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 10:50:39,107][inference][INFO] - + Forward pass latency: 3.34e-03 (s) -[2023-08-29 10:50:39,109][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-29 10:50:39,110][inference][INFO] - + Warming up the generation pass -[2023-08-29 10:50:39,596][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 10:50:44,885][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-29 10:50:44,886][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-29 10:50:44,886][inference][INFO] - Saving inference results -[2023-08-29 10:50:44,897][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a718772ab127fa873e607881ad439a4d520c0d4e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 4e880f1f47dbe5420cbe8ae54e05c9b9858ef213..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.02592,0.00358,279.0 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/main.log deleted file mode 100644 index df5a0e609093b73a3a69ed1267043868b0c3770a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 10:51:56,223][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 10:51:56,224][benchmark][INFO] - + Setting seed(42) -[2023-08-29 10:51:58,007][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 10:51:58,007][backend][INFO] - Configuring pytorch backend -[2023-08-29 10:51:58,007][backend][INFO] - + Checking initial device isolation -[2023-08-29 10:51:58,008][backend][INFO] - + Checking contineous device isolation -[2023-08-29 10:51:58,008][pytorch][INFO] - + Disabling gradients -[2023-08-29 10:51:58,008][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 10:51:58,809][pytorch][INFO] - + Turning on eval mode -[2023-08-29 10:51:58,810][inference][INFO] - Running inference benchmark -[2023-08-29 10:51:58,944][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:51:58,946][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 10:51:59,003][inference][INFO] - + Forward pass peak memory: 467.02592 (MB) -[2023-08-29 10:51:59,004][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:51:59,006][inference][INFO] - + Warming up the forward pass -[2023-08-29 10:51:59,048][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 10:52:04,094][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-29 10:52:04,096][inference][INFO] - + Forward pass throughput: 279.00 (samples/s) -[2023-08-29 10:52:04,096][inference][INFO] - Saving inference results -[2023-08-29 10:52:04,106][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f9c1f93f46b5f5422d7486c4b574a95d8a2802e6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 8c89ea142d0761aafad13d6aaf220c2cc8332770..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.996672,0.00358,1120.0 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8a7a2c3cf027bd7cd9225230179f0d65c35ac346..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 10:52:04,476][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 10:52:04,477][benchmark][INFO] - + Setting seed(42) -[2023-08-29 10:52:04,966][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 10:52:04,967][backend][INFO] - Configuring pytorch backend -[2023-08-29 10:52:04,967][backend][INFO] - + Checking initial device isolation -[2023-08-29 10:52:04,967][backend][INFO] - + Checking contineous device isolation -[2023-08-29 10:52:04,967][pytorch][INFO] - + Disabling gradients -[2023-08-29 10:52:04,967][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 10:52:05,092][pytorch][INFO] - + Turning on eval mode -[2023-08-29 10:52:05,092][inference][INFO] - Running inference benchmark -[2023-08-29 10:52:05,241][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:52:05,242][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 10:52:05,283][inference][INFO] - + Forward pass peak memory: 467.996672 (MB) -[2023-08-29 10:52:05,284][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 10:52:05,286][inference][INFO] - + Warming up the forward pass -[2023-08-29 10:52:05,337][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 10:52:10,379][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-29 10:52:10,381][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-29 10:52:10,381][inference][INFO] - Saving inference results -[2023-08-29 10:52:10,388][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c4ac7414fb87dd5e6e26e392b28eb1150d8743f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4fd81474d6008e91e240b3675cd794accfed7a0d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.204992,0.00393,254.0,0.518,193.0 diff --git a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 64eeeb610b76f319505608d0f41ec402268ed927..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_10:05:27_77713d11f6656314fb06c217cf43c4b8f5c64df8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 10:52:15,146][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 10:52:15,148][benchmark][INFO] - + Setting seed(42) -[2023-08-29 10:52:16,742][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 10:52:16,743][backend][INFO] - Configuring pytorch backend -[2023-08-29 10:52:16,743][backend][INFO] - + Checking initial device isolation -[2023-08-29 10:52:16,743][backend][INFO] - + Checking contineous device isolation -[2023-08-29 10:52:16,743][pytorch][INFO] - + Disabling gradients -[2023-08-29 10:52:16,743][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 10:52:17,479][pytorch][INFO] - + Turning on eval mode -[2023-08-29 10:52:17,480][inference][INFO] - Running inference benchmark -[2023-08-29 10:52:17,682][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 10:52:17,727][inference][INFO] - + Forward pass peak memory: 469.204992 (MB) -[2023-08-29 10:52:17,729][inference][INFO] - + Warming up the forward pass -[2023-08-29 10:52:17,766][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 10:52:22,813][inference][INFO] - + Forward pass latency: 3.93e-03 (s) -[2023-08-29 10:52:22,814][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-08-29 10:52:22,815][inference][INFO] - + Warming up the generation pass -[2023-08-29 10:52:23,407][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 10:52:28,593][inference][INFO] - + Generation pass latency: 5.18e-01 (s) -[2023-08-29 10:52:28,594][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-29 10:52:28,594][inference][INFO] - Saving inference results -[2023-08-29 10:52:28,605][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3e996d004e0a9711906560f789b7fc5152151264..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0f69bbe03f714dacd3b615e2a90fc82594899a43..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.808832,0.0038,263.0 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/main.log deleted file mode 100644 index 72d3825011e15d0f2526089666e0f4dfff3bc1bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 12:59:15,830][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 12:59:15,830][benchmark][INFO] - + Setting seed(42) -[2023-08-29 12:59:17,071][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 12:59:17,071][backend][INFO] - Configuring pytorch backend -[2023-08-29 12:59:17,071][backend][INFO] - + Checking initial device isolation -[2023-08-29 12:59:17,071][backend][INFO] - + Checking contineous device isolation -[2023-08-29 12:59:17,071][pytorch][INFO] - + Disabling gradients -[2023-08-29 12:59:17,072][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 12:59:17,698][pytorch][INFO] - + Turning on eval mode -[2023-08-29 12:59:17,699][inference][INFO] - Running inference benchmark -[2023-08-29 12:59:17,828][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 12:59:17,830][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 12:59:17,894][inference][INFO] - + Forward pass peak memory: 466.808832 (MB) -[2023-08-29 12:59:17,895][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 12:59:17,899][inference][INFO] - + Warming up the forward pass -[2023-08-29 12:59:17,941][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 12:59:22,985][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-29 12:59:22,986][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-29 12:59:22,986][inference][INFO] - Saving inference results -[2023-08-29 12:59:22,996][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5e9599f5d27fc8ddef9364875aae8746f8614200..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 75b303c14ca2b8d37de04ba4f6856a31e0dd3128..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.68127999999996,0.00428,935.0 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/main.log deleted file mode 100644 index e8449d7fd7444f020d39a7de3b20024c86ca9272..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 12:59:23,401][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 12:59:23,402][benchmark][INFO] - + Setting seed(42) -[2023-08-29 12:59:23,851][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 12:59:23,851][backend][INFO] - Configuring pytorch backend -[2023-08-29 12:59:23,851][backend][INFO] - + Checking initial device isolation -[2023-08-29 12:59:23,852][backend][INFO] - + Checking contineous device isolation -[2023-08-29 12:59:23,852][pytorch][INFO] - + Disabling gradients -[2023-08-29 12:59:23,852][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 12:59:23,971][pytorch][INFO] - + Turning on eval mode -[2023-08-29 12:59:23,972][inference][INFO] - Running inference benchmark -[2023-08-29 12:59:24,096][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 12:59:24,098][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 12:59:24,137][inference][INFO] - + Forward pass peak memory: 467.68127999999996 (MB) -[2023-08-29 12:59:24,138][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 12:59:24,139][inference][INFO] - + Warming up the forward pass -[2023-08-29 12:59:24,181][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 12:59:29,224][inference][INFO] - + Forward pass latency: 4.28e-03 (s) -[2023-08-29 12:59:29,225][inference][INFO] - + Forward pass throughput: 935.00 (samples/s) -[2023-08-29 12:59:29,225][inference][INFO] - Saving inference results -[2023-08-29 12:59:29,232][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e527a264558d731b13c5070aa0111279ad44a639..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2a5043277ab1e70cd3daf264261008512d2887cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.467136,0.0038,263.0,0.519,193.0 diff --git a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a40f6e7e6624b9cfb5f44f0cf4ab64ff7eddd7f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:32:19_c9bae84eb58745784e5cc6491f3f4958ba4706c3/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 12:59:34,121][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 12:59:34,122][benchmark][INFO] - + Setting seed(42) -[2023-08-29 12:59:35,756][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 12:59:35,756][backend][INFO] - Configuring pytorch backend -[2023-08-29 12:59:35,756][backend][INFO] - + Checking initial device isolation -[2023-08-29 12:59:35,757][backend][INFO] - + Checking contineous device isolation -[2023-08-29 12:59:35,757][pytorch][INFO] - + Disabling gradients -[2023-08-29 12:59:35,757][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 12:59:36,416][pytorch][INFO] - + Turning on eval mode -[2023-08-29 12:59:36,417][inference][INFO] - Running inference benchmark -[2023-08-29 12:59:36,862][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 12:59:36,908][inference][INFO] - + Forward pass peak memory: 469.467136 (MB) -[2023-08-29 12:59:36,910][inference][INFO] - + Warming up the forward pass -[2023-08-29 12:59:36,945][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 12:59:41,992][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-29 12:59:41,993][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-29 12:59:41,994][inference][INFO] - + Warming up the generation pass -[2023-08-29 12:59:42,589][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 12:59:47,778][inference][INFO] - + Generation pass latency: 5.19e-01 (s) -[2023-08-29 12:59:47,779][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-29 12:59:47,779][inference][INFO] - Saving inference results -[2023-08-29 12:59:47,789][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fba4e6e71c080a322d4f2d5c8f541886e0dfc3cb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c7c7fa56f7e849d1a07a13ae94bb5979e8bb7e3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.079168,0.00366,273.0 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/main.log deleted file mode 100644 index e7446ba0433b7418a95b9f42b55dbedd11e68333..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 13:00:58,048][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 13:00:58,049][benchmark][INFO] - + Setting seed(42) -[2023-08-29 13:00:59,274][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 13:00:59,274][backend][INFO] - Configuring pytorch backend -[2023-08-29 13:00:59,275][backend][INFO] - + Checking initial device isolation -[2023-08-29 13:00:59,275][backend][INFO] - + Checking contineous device isolation -[2023-08-29 13:00:59,275][pytorch][INFO] - + Disabling gradients -[2023-08-29 13:00:59,275][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 13:00:59,887][pytorch][INFO] - + Turning on eval mode -[2023-08-29 13:00:59,888][inference][INFO] - Running inference benchmark -[2023-08-29 13:01:00,015][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:01:00,017][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 13:01:00,080][inference][INFO] - + Forward pass peak memory: 467.079168 (MB) -[2023-08-29 13:01:00,082][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:01:00,084][inference][INFO] - + Warming up the forward pass -[2023-08-29 13:01:00,125][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 13:01:05,173][inference][INFO] - + Forward pass latency: 3.66e-03 (s) -[2023-08-29 13:01:05,175][inference][INFO] - + Forward pass throughput: 273.00 (samples/s) -[2023-08-29 13:01:05,175][inference][INFO] - Saving inference results -[2023-08-29 13:01:05,185][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 96ca1875c9354c2961934cfcbcd511b067dddc24..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 5148a257669aae304591bd7f490bbb14d0586d77..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.058112,0.00419,955.0 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/main.log deleted file mode 100644 index db9feb39187e236deb30f3ef2ab502a5c6f1b0ca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 13:01:05,702][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 13:01:05,702][benchmark][INFO] - + Setting seed(42) -[2023-08-29 13:01:06,181][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 13:01:06,181][backend][INFO] - Configuring pytorch backend -[2023-08-29 13:01:06,181][backend][INFO] - + Checking initial device isolation -[2023-08-29 13:01:06,182][backend][INFO] - + Checking contineous device isolation -[2023-08-29 13:01:06,182][pytorch][INFO] - + Disabling gradients -[2023-08-29 13:01:06,182][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 13:01:06,296][pytorch][INFO] - + Turning on eval mode -[2023-08-29 13:01:06,297][inference][INFO] - Running inference benchmark -[2023-08-29 13:01:06,418][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:01:06,419][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 13:01:06,461][inference][INFO] - + Forward pass peak memory: 468.058112 (MB) -[2023-08-29 13:01:06,462][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:01:06,463][inference][INFO] - + Warming up the forward pass -[2023-08-29 13:01:06,505][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 13:01:11,548][inference][INFO] - + Forward pass latency: 4.19e-03 (s) -[2023-08-29 13:01:11,550][inference][INFO] - + Forward pass throughput: 955.00 (samples/s) -[2023-08-29 13:01:11,550][inference][INFO] - Saving inference results -[2023-08-29 13:01:11,556][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 54c129f1ae1fe2ddb9270d1fe9ae70be9e8ca49f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5946220a532107cf7751a973660eddbbc86a53c7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.38111999999995,0.00367,272.0,0.494,202.0 diff --git a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a6a5c7f8648ee533b077ca5725ff201e87f50da4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_11:52:25_dc0c102954ff1f6bcb47de85afea5edc81fc8c7f/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 13:01:16,481][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 13:01:16,482][benchmark][INFO] - + Setting seed(42) -[2023-08-29 13:01:17,899][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 13:01:17,900][backend][INFO] - Configuring pytorch backend -[2023-08-29 13:01:17,900][backend][INFO] - + Checking initial device isolation -[2023-08-29 13:01:17,900][backend][INFO] - + Checking contineous device isolation -[2023-08-29 13:01:17,901][pytorch][INFO] - + Disabling gradients -[2023-08-29 13:01:17,901][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 13:01:18,571][pytorch][INFO] - + Turning on eval mode -[2023-08-29 13:01:18,572][inference][INFO] - Running inference benchmark -[2023-08-29 13:01:18,782][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 13:01:18,833][inference][INFO] - + Forward pass peak memory: 469.38111999999995 (MB) -[2023-08-29 13:01:18,835][inference][INFO] - + Warming up the forward pass -[2023-08-29 13:01:18,883][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 13:01:23,931][inference][INFO] - + Forward pass latency: 3.67e-03 (s) -[2023-08-29 13:01:23,933][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-08-29 13:01:23,934][inference][INFO] - + Warming up the generation pass -[2023-08-29 13:01:24,438][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 13:01:29,874][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-08-29 13:01:29,875][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-08-29 13:01:29,875][inference][INFO] - Saving inference results -[2023-08-29 13:01:29,886][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1432c1ff2c278d3880bd1b508dca62544016d3a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 4aef760e89fafa817842abfc247eaf7d34a82fe3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.522112,0.00332,301.0 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0f566f43fac26094b63b3437971d943c1bb8d19b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 13:02:36,415][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 13:02:36,416][benchmark][INFO] - + Setting seed(42) -[2023-08-29 13:02:37,632][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 13:02:37,632][backend][INFO] - Configuring pytorch backend -[2023-08-29 13:02:37,633][backend][INFO] - + Checking initial device isolation -[2023-08-29 13:02:37,633][backend][INFO] - + Checking contineous device isolation -[2023-08-29 13:02:37,633][pytorch][INFO] - + Disabling gradients -[2023-08-29 13:02:37,633][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 13:02:38,220][pytorch][INFO] - + Turning on eval mode -[2023-08-29 13:02:38,221][inference][INFO] - Running inference benchmark -[2023-08-29 13:02:38,340][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:02:38,342][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 13:02:38,394][inference][INFO] - + Forward pass peak memory: 466.522112 (MB) -[2023-08-29 13:02:38,396][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:02:38,397][inference][INFO] - + Warming up the forward pass -[2023-08-29 13:02:38,428][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 13:02:43,476][inference][INFO] - + Forward pass latency: 3.32e-03 (s) -[2023-08-29 13:02:43,477][inference][INFO] - + Forward pass throughput: 301.00 (samples/s) -[2023-08-29 13:02:43,477][inference][INFO] - Saving inference results -[2023-08-29 13:02:43,486][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a92f5b3c526759a58b61259eee5e0128e8054c0f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index bcb83ac880205c72c124f2c966f9c78c78c82fb6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.47648,0.00486,823.0 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/main.log deleted file mode 100644 index 12de37dee1121d6035d82f37e468e19feabff2c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 13:02:43,852][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 13:02:43,853][benchmark][INFO] - + Setting seed(42) -[2023-08-29 13:02:44,265][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 13:02:44,265][backend][INFO] - Configuring pytorch backend -[2023-08-29 13:02:44,266][backend][INFO] - + Checking initial device isolation -[2023-08-29 13:02:44,266][backend][INFO] - + Checking contineous device isolation -[2023-08-29 13:02:44,266][pytorch][INFO] - + Disabling gradients -[2023-08-29 13:02:44,266][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 13:02:44,385][pytorch][INFO] - + Turning on eval mode -[2023-08-29 13:02:44,386][inference][INFO] - Running inference benchmark -[2023-08-29 13:02:44,510][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:02:44,512][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 13:02:44,551][inference][INFO] - + Forward pass peak memory: 467.47648 (MB) -[2023-08-29 13:02:44,552][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 13:02:44,553][inference][INFO] - + Warming up the forward pass -[2023-08-29 13:02:44,601][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 13:02:49,639][inference][INFO] - + Forward pass latency: 4.86e-03 (s) -[2023-08-29 13:02:49,640][inference][INFO] - + Forward pass throughput: 823.00 (samples/s) -[2023-08-29 13:02:49,640][inference][INFO] - Saving inference results -[2023-08-29 13:02:49,647][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 5800875aeb7a0eb5f4b562b65a7f887c7d95c429..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 16d2530ce3c3aa9692ba397956d8c7d7d5eb2d7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.774336,0.00411,243.0,0.541,185.0 diff --git a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index bb61957dbf0d31e96b21426ad28203c556ed8018..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_12:11:48_3dd030d264915c71a0bdd23838dbb27156f44ed1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 13:02:54,533][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 13:02:54,534][benchmark][INFO] - + Setting seed(42) -[2023-08-29 13:02:55,944][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 13:02:55,945][backend][INFO] - Configuring pytorch backend -[2023-08-29 13:02:55,945][backend][INFO] - + Checking initial device isolation -[2023-08-29 13:02:55,945][backend][INFO] - + Checking contineous device isolation -[2023-08-29 13:02:55,945][pytorch][INFO] - + Disabling gradients -[2023-08-29 13:02:55,945][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 13:02:56,620][pytorch][INFO] - + Turning on eval mode -[2023-08-29 13:02:56,621][inference][INFO] - Running inference benchmark -[2023-08-29 13:02:56,859][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 13:02:56,909][inference][INFO] - + Forward pass peak memory: 469.774336 (MB) -[2023-08-29 13:02:56,911][inference][INFO] - + Warming up the forward pass -[2023-08-29 13:02:56,952][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 13:03:01,995][inference][INFO] - + Forward pass latency: 4.11e-03 (s) -[2023-08-29 13:03:01,996][inference][INFO] - + Forward pass throughput: 243.00 (samples/s) -[2023-08-29 13:03:01,997][inference][INFO] - + Warming up the generation pass -[2023-08-29 13:03:02,590][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 13:03:08,005][inference][INFO] - + Generation pass latency: 5.41e-01 (s) -[2023-08-29 13:03:08,006][inference][INFO] - + Generation pass throughput: 185.00 (tokens/s) -[2023-08-29 13:03:08,007][inference][INFO] - Saving inference results -[2023-08-29 13:03:08,017][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 99be71f95ccf013d1faf0cf03cc7bb073694c38e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 31f06ebb47518c85712e17a49f6cbb4a6b089684..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.989056,0.00304,329.0 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/main.log deleted file mode 100644 index 43a34a3beecf6d78a3e44f1f91eca55700b02a58..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:49:45,997][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:49:45,998][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:49:47,396][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:49:47,396][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:49:47,396][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:49:47,397][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:49:47,397][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:49:47,397][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:49:48,028][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:49:48,028][inference][INFO] - Running inference benchmark -[2023-08-29 14:49:48,152][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:49:48,153][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:49:48,213][inference][INFO] - + Forward pass peak memory: 466.989056 (MB) -[2023-08-29 14:49:48,215][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:49:48,216][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:49:48,247][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:49:53,301][inference][INFO] - + Forward pass latency: 3.04e-03 (s) -[2023-08-29 14:49:53,303][inference][INFO] - + Forward pass throughput: 329.00 (samples/s) -[2023-08-29 14:49:53,303][inference][INFO] - Saving inference results -[2023-08-29 14:49:53,316][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 9741666f1533e1ec2a8592ff60cd2137e8e63a7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f8ece0482cdb37f4b7fe063304d6a69851a30989..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.045824,0.00353,1130.0 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7203216f220f1e4c24619e80ce66f22dc9264432..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:49:53,689][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:49:53,690][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:49:54,210][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:49:54,210][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:49:54,210][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:49:54,210][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:49:54,210][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:49:54,211][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:49:54,428][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:49:54,429][inference][INFO] - Running inference benchmark -[2023-08-29 14:49:54,553][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:49:54,554][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:49:54,597][inference][INFO] - + Forward pass peak memory: 468.045824 (MB) -[2023-08-29 14:49:54,598][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:49:54,599][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:49:54,641][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:49:59,685][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-29 14:49:59,686][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-29 14:49:59,686][inference][INFO] - Saving inference results -[2023-08-29 14:49:59,693][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 12bb43fd79c3be82157665ba367df495b8422c86..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cbeced014e81d521b028671fb91fc26961de3c47..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.38931199999996,0.00319,313.0,0.482,207.0 diff --git a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ad27f781edd48ab42c27cd493ff60056773e5dab..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:02:57_9525515cd40ab2632cf40e1a9d21f7751b02eceb/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 14:50:05,146][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:50:05,147][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:50:06,696][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 14:50:06,696][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:50:06,697][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:50:06,697][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:50:06,697][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:50:06,697][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:50:07,426][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:50:07,426][inference][INFO] - Running inference benchmark -[2023-08-29 14:50:07,632][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:50:07,680][inference][INFO] - + Forward pass peak memory: 469.38931199999996 (MB) -[2023-08-29 14:50:07,682][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:50:07,718][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:50:12,770][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-08-29 14:50:12,772][inference][INFO] - + Forward pass throughput: 313.00 (samples/s) -[2023-08-29 14:50:12,773][inference][INFO] - + Warming up the generation pass -[2023-08-29 14:50:13,288][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 14:50:18,595][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-29 14:50:18,596][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-29 14:50:18,596][inference][INFO] - Saving inference results -[2023-08-29 14:50:18,608][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7595a8697c4fffe370fa88ad13bc929d14e605c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a6021a738cf7062ed41d47d6a2967fd6c3357f09..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.419712,0.00322,311.0 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/main.log deleted file mode 100644 index c32db95e8875ef18ea204afe7891fcd0d33b680e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:51:28,475][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:51:28,476][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:51:29,747][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:51:29,747][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:51:29,747][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:51:29,747][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:51:29,748][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:51:29,748][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:51:30,409][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:51:30,410][inference][INFO] - Running inference benchmark -[2023-08-29 14:51:30,527][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:51:30,528][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:51:30,588][inference][INFO] - + Forward pass peak memory: 466.419712 (MB) -[2023-08-29 14:51:30,589][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:51:30,590][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:51:30,627][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:51:35,678][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-08-29 14:51:35,680][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-08-29 14:51:35,680][inference][INFO] - Saving inference results -[2023-08-29 14:51:35,693][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1aee240d10fb9bc28642e7ddd9a8d6819c6869f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 28b7d9c6a090888e111aad0a4d79b28ce3b176a7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.40275199999996,0.00353,1130.0 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/main.log deleted file mode 100644 index e1da28dd58a73e9d395544c8d5ce750a5c9c4423..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:51:36,090][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:51:36,091][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:51:36,533][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:51:36,534][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:51:36,534][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:51:36,534][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:51:36,534][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:51:36,535][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:51:36,655][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:51:36,656][inference][INFO] - Running inference benchmark -[2023-08-29 14:51:36,782][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:51:36,783][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:51:36,826][inference][INFO] - + Forward pass peak memory: 467.40275199999996 (MB) -[2023-08-29 14:51:36,827][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:51:36,829][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:51:36,865][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:51:41,913][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-29 14:51:41,914][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-29 14:51:41,914][inference][INFO] - Saving inference results -[2023-08-29 14:51:41,923][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 15f4129e29b9510ccae88d6abaa542573e9d5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index b4fb0f065afab374492ab24a900be735b5d17b04..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.09849599999995,0.00384,260.0,0.557,180.0 diff --git a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 771c6f28384616258fe3212b47cb308ee84df2f5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:08:14_5b5ee235f3239413e9614bd02032b1a203dab710/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 14:51:46,883][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:51:46,884][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:51:48,547][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 14:51:48,548][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:51:48,548][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:51:48,548][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:51:48,548][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:51:48,548][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:51:49,195][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:51:49,195][inference][INFO] - Running inference benchmark -[2023-08-29 14:51:49,385][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:51:49,431][inference][INFO] - + Forward pass peak memory: 469.09849599999995 (MB) -[2023-08-29 14:51:49,433][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:51:49,469][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:51:54,513][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-08-29 14:51:54,514][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-29 14:51:54,515][inference][INFO] - + Warming up the generation pass -[2023-08-29 14:51:55,017][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 14:52:00,028][inference][INFO] - + Generation pass latency: 5.57e-01 (s) -[2023-08-29 14:52:00,030][inference][INFO] - + Generation pass throughput: 180.00 (tokens/s) -[2023-08-29 14:52:00,030][inference][INFO] - Saving inference results -[2023-08-29 14:52:00,041][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 857d34d00f8146546d77a350ecce09151f372473..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 66dd7d9bed29ffb6151d59f16b6de32c122b7a01..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.075072,0.00308,325.0 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/main.log deleted file mode 100644 index 27eaf0a5b00fb5a98cf4e9c31b8f511489c512ef..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:53:07,498][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:53:07,499][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:53:08,698][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:53:08,698][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:53:08,698][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:53:08,698][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:53:08,698][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:53:08,698][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:53:09,314][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:53:09,314][inference][INFO] - Running inference benchmark -[2023-08-29 14:53:09,437][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:53:09,439][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:53:09,498][inference][INFO] - + Forward pass peak memory: 467.075072 (MB) -[2023-08-29 14:53:09,500][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:53:09,502][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:53:09,538][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:53:14,591][inference][INFO] - + Forward pass latency: 3.08e-03 (s) -[2023-08-29 14:53:14,592][inference][INFO] - + Forward pass throughput: 325.00 (samples/s) -[2023-08-29 14:53:14,593][inference][INFO] - Saving inference results -[2023-08-29 14:53:14,604][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 9ccfbe6c3930b7662e928f8a65801692c6e2d6bf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 01515c120b0300e6f6b39d2bc872002d15d3c962..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.13183999999995,0.00338,1180.0 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4c841f106cb9b74fb3a8d14c3ad6f2f7e19ad578..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:53:14,974][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:53:14,976][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:53:15,413][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:53:15,413][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:53:15,413][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:53:15,414][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:53:15,414][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:53:15,414][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:53:15,528][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:53:15,529][inference][INFO] - Running inference benchmark -[2023-08-29 14:53:15,653][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:53:15,655][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:53:15,697][inference][INFO] - + Forward pass peak memory: 468.13183999999995 (MB) -[2023-08-29 14:53:15,698][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:53:15,700][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:53:15,735][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:53:20,783][inference][INFO] - + Forward pass latency: 3.38e-03 (s) -[2023-08-29 14:53:20,784][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-29 14:53:20,784][inference][INFO] - Saving inference results -[2023-08-29 14:53:20,791][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 77955ffc7c537416d180e8fd6b8441ea90385fe3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 991cab31118b6afda5351eab53b0b2df17becc81..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.643264,0.00374,267.0,0.486,206.0 diff --git a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8b3ac3249f1e08142e9f4393483adeb3b76913c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:09:07_2ee60b757e30815529239c87235a2b794fa60286/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 14:53:25,632][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:53:25,633][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:53:27,082][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 14:53:27,082][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:53:27,082][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:53:27,083][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:53:27,083][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:53:27,083][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:53:27,725][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:53:27,726][inference][INFO] - Running inference benchmark -[2023-08-29 14:53:27,924][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:53:27,968][inference][INFO] - + Forward pass peak memory: 469.643264 (MB) -[2023-08-29 14:53:27,970][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:53:28,007][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:53:33,053][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-08-29 14:53:33,054][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-29 14:53:33,055][inference][INFO] - + Warming up the generation pass -[2023-08-29 14:53:33,572][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 14:53:38,922][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-08-29 14:53:38,923][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-29 14:53:38,923][inference][INFO] - Saving inference results -[2023-08-29 14:53:38,934][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 50ea8292f69ffd01cdd65a4ac0e272b05a7ff6a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 06c1ca7e3b2f95039e0a716cd58c79de7c2dadda..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.567168,0.00383,261.0 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5ad801ee056472914ed4d9308af547407de150bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:54:47,554][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:54:47,555][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:54:48,860][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:54:48,860][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:54:48,860][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:54:48,861][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:54:48,861][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:54:48,861][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:54:49,472][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:54:49,472][inference][INFO] - Running inference benchmark -[2023-08-29 14:54:49,589][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:54:49,590][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:54:49,649][inference][INFO] - + Forward pass peak memory: 466.567168 (MB) -[2023-08-29 14:54:49,651][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:54:49,652][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:54:49,688][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:54:54,733][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-29 14:54:54,735][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-29 14:54:54,735][inference][INFO] - Saving inference results -[2023-08-29 14:54:54,745][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5c111b773ce18ba8a625349263983b62c2994f8a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index bdaef984eefb0a16e2e95fdb382e5b3cdf3dc7db..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.50924799999996,0.0044,909.0 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/main.log deleted file mode 100644 index b0bca589eb18ed14b231d4785471b3613cf19f62..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:54:55,157][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:54:55,158][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:54:55,722][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:54:55,723][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:54:55,723][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:54:55,723][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:54:55,723][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:54:55,724][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:54:55,839][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:54:55,840][inference][INFO] - Running inference benchmark -[2023-08-29 14:54:55,961][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:54:55,962][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:54:56,000][inference][INFO] - + Forward pass peak memory: 467.50924799999996 (MB) -[2023-08-29 14:54:56,001][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:54:56,003][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:54:56,046][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:55:01,085][inference][INFO] - + Forward pass latency: 4.40e-03 (s) -[2023-08-29 14:55:01,087][inference][INFO] - + Forward pass throughput: 909.00 (samples/s) -[2023-08-29 14:55:01,087][inference][INFO] - Saving inference results -[2023-08-29 14:55:01,094][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b30b84aa529e069ec07c79d307ae5c66f861f0d7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 620acf77b922d647b89ff42920d94fa966ea16cb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.72108799999995,0.00389,257.0,0.583,172.0 diff --git a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8b930017f00adbb492b26a36a7979607f17fd209..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:10:00_173fa7da9c29c4e3a683ac5d489cde4e7220c98a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 14:55:05,923][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:55:05,924][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:55:07,721][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 14:55:07,721][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:55:07,721][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:55:07,722][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:55:07,722][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:55:07,722][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:55:08,556][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:55:08,556][inference][INFO] - Running inference benchmark -[2023-08-29 14:55:08,818][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:55:08,868][inference][INFO] - + Forward pass peak memory: 469.72108799999995 (MB) -[2023-08-29 14:55:08,870][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:55:08,904][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:55:13,949][inference][INFO] - + Forward pass latency: 3.89e-03 (s) -[2023-08-29 14:55:13,950][inference][INFO] - + Forward pass throughput: 257.00 (samples/s) -[2023-08-29 14:55:13,951][inference][INFO] - + Warming up the generation pass -[2023-08-29 14:55:14,552][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 14:55:19,805][inference][INFO] - + Generation pass latency: 5.83e-01 (s) -[2023-08-29 14:55:19,806][inference][INFO] - + Generation pass throughput: 172.00 (tokens/s) -[2023-08-29 14:55:19,806][inference][INFO] - Saving inference results -[2023-08-29 14:55:19,818][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 26e5fc834b843363c49d9b761d7c075c7b1220be..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2d0a2ed49dc95bcf3e9b41c1008dcb2855f7745f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.735104,0.0038,263.0 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/main.log deleted file mode 100644 index 7cf145f68f40ab47f6a0ef98cdf00d1ad0d7e500..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:56:29,281][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:56:29,282][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:56:30,520][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:56:30,520][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:56:30,520][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:56:30,520][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:56:30,521][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:56:30,521][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:56:31,123][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:56:31,124][inference][INFO] - Running inference benchmark -[2023-08-29 14:56:31,318][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:56:31,319][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:56:31,377][inference][INFO] - + Forward pass peak memory: 466.735104 (MB) -[2023-08-29 14:56:31,378][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:56:31,383][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:56:31,424][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:56:36,474][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-29 14:56:36,475][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-29 14:56:36,475][inference][INFO] - Saving inference results -[2023-08-29 14:56:36,486][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 92259aa57fe648b63f618f278700ea3d20b061af..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 2e3bae28af00fd05c306aa62f5a896b133105d66..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.64032,0.00426,939.0 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/main.log deleted file mode 100644 index 71cac301369c88f86994f2cfda94af3ded628709..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:56:36,857][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:56:36,858][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:56:37,316][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:56:37,317][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:56:37,317][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:56:37,317][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:56:37,317][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:56:37,317][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:56:37,546][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:56:37,547][inference][INFO] - Running inference benchmark -[2023-08-29 14:56:37,673][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:56:37,674][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:56:37,715][inference][INFO] - + Forward pass peak memory: 467.64032 (MB) -[2023-08-29 14:56:37,716][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:56:37,718][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:56:37,762][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:56:42,804][inference][INFO] - + Forward pass latency: 4.26e-03 (s) -[2023-08-29 14:56:42,805][inference][INFO] - + Forward pass throughput: 939.00 (samples/s) -[2023-08-29 14:56:42,806][inference][INFO] - Saving inference results -[2023-08-29 14:56:42,813][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 5b6cc14b48455ea272521252941933f8634bd966..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 87b97ca8ce2db3bd2bb4bbf53b19ac226de02e8c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.34015999999997,0.00383,261.0,0.55,182.0 diff --git a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index bbd295e64be3b6fccb776b3ae812c8d1649716ed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_13:48:51_33aa0af70c70d9a8205b0ff0d1d4e68807fbb173/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 14:56:47,562][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:56:47,563][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:56:49,322][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 14:56:49,322][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:56:49,323][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:56:49,323][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:56:49,323][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:56:49,323][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:56:49,985][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:56:49,986][inference][INFO] - Running inference benchmark -[2023-08-29 14:56:50,186][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:56:50,232][inference][INFO] - + Forward pass peak memory: 469.34015999999997 (MB) -[2023-08-29 14:56:50,234][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:56:50,269][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:56:55,313][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-29 14:56:55,314][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-29 14:56:55,315][inference][INFO] - + Warming up the generation pass -[2023-08-29 14:56:55,908][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 14:57:01,408][inference][INFO] - + Generation pass latency: 5.50e-01 (s) -[2023-08-29 14:57:01,409][inference][INFO] - + Generation pass throughput: 182.00 (tokens/s) -[2023-08-29 14:57:01,409][inference][INFO] - Saving inference results -[2023-08-29 14:57:01,420][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b7f01470a84e5a7b20390ca881d57f3cae8216ca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6a9a3d6ed2573ae8c4d3f8febe62de6498e4d5be..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.989056,0.0031,323.0 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/main.log deleted file mode 100644 index 93d15eb7b39593155fe100e9a846da3e06da4ab4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:58:09,262][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:58:09,263][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:58:10,499][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:58:10,499][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:58:10,500][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:58:10,500][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:58:10,500][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:58:10,500][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:58:11,129][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:58:11,130][inference][INFO] - Running inference benchmark -[2023-08-29 14:58:11,293][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:58:11,295][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:58:11,356][inference][INFO] - + Forward pass peak memory: 466.989056 (MB) -[2023-08-29 14:58:11,358][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:58:11,359][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:58:11,396][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:58:16,450][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-29 14:58:16,451][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-29 14:58:16,451][inference][INFO] - Saving inference results -[2023-08-29 14:58:16,464][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 73dd7995a60c598459deffb5c5e2d42e9dadc283..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7889ddee596449a4db6cf637921b2f58e0c337b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.037632,0.00341,1170.0 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/main.log deleted file mode 100644 index fb9879c6cb3b4a58a60e010afa995caae6e21da2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 14:58:17,189][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:58:17,190][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:58:17,713][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 14:58:17,714][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:58:17,714][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:58:17,714][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:58:17,714][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:58:17,714][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:58:17,830][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:58:17,831][inference][INFO] - Running inference benchmark -[2023-08-29 14:58:17,952][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:58:17,953][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:58:17,997][inference][INFO] - + Forward pass peak memory: 468.037632 (MB) -[2023-08-29 14:58:17,998][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 14:58:17,999][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:58:18,035][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:58:23,081][inference][INFO] - + Forward pass latency: 3.41e-03 (s) -[2023-08-29 14:58:23,082][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-29 14:58:23,082][inference][INFO] - Saving inference results -[2023-08-29 14:58:23,089][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d9899dbb978aff0ff2ab9cf5549d9dcd6eb59edb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e6e099cae804870c6d2a708f8a0bef1a6ab960ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.27462399999996,0.0038,263.0,0.519,193.0 diff --git a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6d87f8a8da9a4ca2a2fcc4d01a60ae7d572678b1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_14:06:41_483861d52db59cf99219a0281695d1e7e8859218/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 14:58:28,014][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 14:58:28,015][benchmark][INFO] - + Setting seed(42) -[2023-08-29 14:58:29,529][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 14:58:29,530][backend][INFO] - Configuring pytorch backend -[2023-08-29 14:58:29,530][backend][INFO] - + Checking initial device isolation -[2023-08-29 14:58:29,530][backend][INFO] - + Checking contineous device isolation -[2023-08-29 14:58:29,530][pytorch][INFO] - + Disabling gradients -[2023-08-29 14:58:29,531][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 14:58:30,193][pytorch][INFO] - + Turning on eval mode -[2023-08-29 14:58:30,193][inference][INFO] - Running inference benchmark -[2023-08-29 14:58:30,394][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 14:58:30,444][inference][INFO] - + Forward pass peak memory: 469.27462399999996 (MB) -[2023-08-29 14:58:30,446][inference][INFO] - + Warming up the forward pass -[2023-08-29 14:58:30,485][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 14:58:35,528][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-29 14:58:35,530][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-29 14:58:35,530][inference][INFO] - + Warming up the generation pass -[2023-08-29 14:58:36,113][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 14:58:41,302][inference][INFO] - + Generation pass latency: 5.19e-01 (s) -[2023-08-29 14:58:41,303][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-08-29 14:58:41,303][inference][INFO] - Saving inference results -[2023-08-29 14:58:41,317][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b5a3ba8b0865fe6fede1a4ec8c044e0f2716ff4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index dea51256198dff0d010ee981ec6f04639648497d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.72691199999997,0.00356,281.0 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/main.log deleted file mode 100644 index b7e338d16b93265eaca9e36080191af2496cc6a1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:50:12,339][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:50:12,339][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:50:13,880][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:50:13,880][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:50:13,880][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:50:13,880][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:50:13,880][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:50:13,881][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:50:14,468][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:50:14,469][inference][INFO] - Running inference benchmark -[2023-08-29 16:50:14,590][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:50:14,591][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:50:14,657][inference][INFO] - + Forward pass peak memory: 466.72691199999997 (MB) -[2023-08-29 16:50:14,658][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:50:14,660][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:50:14,692][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:50:19,743][inference][INFO] - + Forward pass latency: 3.56e-03 (s) -[2023-08-29 16:50:19,744][inference][INFO] - + Forward pass throughput: 281.00 (samples/s) -[2023-08-29 16:50:19,744][inference][INFO] - Saving inference results -[2023-08-29 16:50:19,754][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ba85060506492dccf250a89ca6e400a8793ffac4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 5d871090fa0b96db2b47ef078b8d8f9bc8e1ec61..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.62393599999996,0.00402,995.0 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/main.log deleted file mode 100644 index feae43dbd4886b9790683d2e6261e8c43d96116d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:50:20,128][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:50:20,130][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:50:20,710][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:50:20,710][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:50:20,710][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:50:20,710][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:50:20,710][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:50:20,711][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:50:20,847][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:50:20,847][inference][INFO] - Running inference benchmark -[2023-08-29 16:50:20,972][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:50:20,973][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:50:21,013][inference][INFO] - + Forward pass peak memory: 467.62393599999996 (MB) -[2023-08-29 16:50:21,014][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:50:21,016][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:50:21,057][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:50:26,100][inference][INFO] - + Forward pass latency: 4.02e-03 (s) -[2023-08-29 16:50:26,101][inference][INFO] - + Forward pass throughput: 995.00 (samples/s) -[2023-08-29 16:50:26,101][inference][INFO] - Saving inference results -[2023-08-29 16:50:26,109][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2148d1be46072657c188d449d685d8ee6b66f443..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f620e62bb1fd70e54732c745054943f47f8a1728..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.44255999999996,0.00309,324.0,0.488,205.0 diff --git a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index fbc008e272229c9de97a6f31809665957f381832..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:15:05_a35f889acc91cb40bd8c6659691aeb27581a69b1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 16:50:30,985][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:50:30,987][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:50:32,531][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 16:50:32,531][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:50:32,531][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:50:32,531][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:50:32,532][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:50:32,532][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:50:33,184][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:50:33,185][inference][INFO] - Running inference benchmark -[2023-08-29 16:50:33,382][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:50:33,427][inference][INFO] - + Forward pass peak memory: 469.44255999999996 (MB) -[2023-08-29 16:50:33,428][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:50:33,464][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:50:38,515][inference][INFO] - + Forward pass latency: 3.09e-03 (s) -[2023-08-29 16:50:38,516][inference][INFO] - + Forward pass throughput: 324.00 (samples/s) -[2023-08-29 16:50:38,517][inference][INFO] - + Warming up the generation pass -[2023-08-29 16:50:39,023][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 16:50:44,398][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-08-29 16:50:44,399][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-29 16:50:44,399][inference][INFO] - Saving inference results -[2023-08-29 16:50:44,410][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 012e1dd3ec0cacc37e77fd40e0ff1022b749aa42..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0d2e5b8ef430c7b6ca21c6f0a7c17ab9d5a34098..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.05459199999996,0.00399,251.0 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/main.log deleted file mode 100644 index ffbc1ddd5bf9ee2bce0f6661994f49ef17d27a78..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:52:34,383][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:52:34,384][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:52:35,792][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:52:35,792][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:52:35,792][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:52:35,792][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:52:35,793][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:52:35,793][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:52:36,417][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:52:36,417][inference][INFO] - Running inference benchmark -[2023-08-29 16:52:36,529][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:52:36,531][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:52:36,593][inference][INFO] - + Forward pass peak memory: 467.05459199999996 (MB) -[2023-08-29 16:52:36,595][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:52:36,597][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:52:36,638][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:52:41,683][inference][INFO] - + Forward pass latency: 3.99e-03 (s) -[2023-08-29 16:52:41,684][inference][INFO] - + Forward pass throughput: 251.00 (samples/s) -[2023-08-29 16:52:41,684][inference][INFO] - Saving inference results -[2023-08-29 16:52:41,694][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 0333baed338e00951ad1dc6c5484bdec757f1888..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 837b411ea49867f4035e066935e6e23e56d04476..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.013056,0.0034,1180.0 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8a53f72360b6e452ae45ae12308dc823de11a041..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:52:42,070][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:52:42,071][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:52:42,520][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:52:42,520][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:52:42,520][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:52:42,520][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:52:42,520][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:52:42,520][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:52:42,640][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:52:42,640][inference][INFO] - Running inference benchmark -[2023-08-29 16:52:42,770][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:52:42,771][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:52:42,809][inference][INFO] - + Forward pass peak memory: 468.013056 (MB) -[2023-08-29 16:52:42,810][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:52:42,812][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:52:42,846][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:52:47,892][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-29 16:52:47,893][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-29 16:52:47,893][inference][INFO] - Saving inference results -[2023-08-29 16:52:47,901][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index fa1039435a7dded1685cb20681014664cccffc4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 730322ae038ed972265e61f5e3c8690bf4d577f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.123072,0.00378,265.0,0.508,197.0 diff --git a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b1f761499b12882f13ba60f5431ab5e08725f6b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:38:44_d97fd871e5ba57b23b1775ef2939ffea128dd08d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 16:52:52,636][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:52:52,637][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:52:54,047][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 16:52:54,047][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:52:54,048][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:52:54,048][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:52:54,048][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:52:54,048][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:52:54,693][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:52:54,694][inference][INFO] - Running inference benchmark -[2023-08-29 16:52:54,960][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:52:55,005][inference][INFO] - + Forward pass peak memory: 469.123072 (MB) -[2023-08-29 16:52:55,007][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:52:55,042][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:53:00,088][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-08-29 16:53:00,090][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-08-29 16:53:00,090][inference][INFO] - + Warming up the generation pass -[2023-08-29 16:53:00,602][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 16:53:05,685][inference][INFO] - + Generation pass latency: 5.08e-01 (s) -[2023-08-29 16:53:05,686][inference][INFO] - + Generation pass throughput: 197.00 (tokens/s) -[2023-08-29 16:53:05,686][inference][INFO] - Saving inference results -[2023-08-29 16:53:05,697][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2f4dd20782f69df3b51289b375cb11edc55e3b51..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0df4f52362befd438c441559f5e829ff944d02aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.747392,0.0037,270.0 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/main.log deleted file mode 100644 index 7a3b832a682a235de7a74ccdd02ad2a850e80436..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:54:12,750][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:54:12,751][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:54:13,995][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:54:13,996][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:54:13,996][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:54:13,996][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:54:13,996][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:54:13,996][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:54:14,624][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:54:14,625][inference][INFO] - Running inference benchmark -[2023-08-29 16:54:14,746][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:54:14,748][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:54:14,808][inference][INFO] - + Forward pass peak memory: 466.747392 (MB) -[2023-08-29 16:54:14,809][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:54:14,811][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:54:14,849][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:54:19,894][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-08-29 16:54:19,895][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-08-29 16:54:19,896][inference][INFO] - Saving inference results -[2023-08-29 16:54:19,905][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index af864e2540e11994a6e444d33428e4b95536e5de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 01a0537db7be22e990967c61974832b089441077..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.63212799999997,0.00358,1120.0 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/main.log deleted file mode 100644 index 49eff9bcb4f255aafa8e03e8f21ea749be6609ec..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:54:20,280][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:54:20,281][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:54:20,734][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:54:20,734][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:54:20,734][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:54:20,734][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:54:20,734][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:54:20,735][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:54:20,852][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:54:20,852][inference][INFO] - Running inference benchmark -[2023-08-29 16:54:20,977][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:54:20,979][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:54:21,022][inference][INFO] - + Forward pass peak memory: 467.63212799999997 (MB) -[2023-08-29 16:54:21,023][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:54:21,025][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:54:21,061][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:54:26,104][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-29 16:54:26,106][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-29 16:54:26,106][inference][INFO] - Saving inference results -[2023-08-29 16:54:26,114][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 8154254e37ccd0658f24ea482a0b8e9729cfc688..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 6785aa741d458db3c64e6f2bfca30824fe83f266..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.43846399999995,0.00396,253.0,0.502,199.0 diff --git a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8dd2f23355542e41e97802d48ba2d43085af1ff9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_15:47:24_aade754b27f4496cd67c2d1bfb67ef0df0ffa5d1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 16:54:31,330][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:54:31,331][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:54:32,788][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 16:54:32,788][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:54:32,788][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:54:32,788][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:54:32,788][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:54:32,789][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:54:33,508][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:54:33,508][inference][INFO] - Running inference benchmark -[2023-08-29 16:54:33,718][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:54:33,762][inference][INFO] - + Forward pass peak memory: 469.43846399999995 (MB) -[2023-08-29 16:54:33,764][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:54:33,801][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:54:38,844][inference][INFO] - + Forward pass latency: 3.96e-03 (s) -[2023-08-29 16:54:38,846][inference][INFO] - + Forward pass throughput: 253.00 (samples/s) -[2023-08-29 16:54:38,847][inference][INFO] - + Warming up the generation pass -[2023-08-29 16:54:39,441][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 16:54:44,461][inference][INFO] - + Generation pass latency: 5.02e-01 (s) -[2023-08-29 16:54:44,462][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-08-29 16:54:44,462][inference][INFO] - Saving inference results -[2023-08-29 16:54:44,472][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 07b7762ce6f25db76ab163cf100529b1945d51c5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e80defd9ac023af488b737d539f84b0f9dcb9cb5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.853888,0.00317,315.0 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/main.log deleted file mode 100644 index bd1a2bb1941b7e1568296f40c7894bbae72aea1b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:55:53,731][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:55:53,731][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:55:54,980][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:55:54,980][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:55:54,981][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:55:54,981][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:55:54,981][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:55:54,981][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:55:55,596][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:55:55,596][inference][INFO] - Running inference benchmark -[2023-08-29 16:55:55,719][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:55:55,720][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:55:55,789][inference][INFO] - + Forward pass peak memory: 466.853888 (MB) -[2023-08-29 16:55:55,790][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:55:55,792][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:55:55,826][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:56:00,874][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-08-29 16:56:00,875][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-08-29 16:56:00,875][inference][INFO] - Saving inference results -[2023-08-29 16:56:00,886][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index fcf26eafcc7595f8614c1535f16e37e991b34c3d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index af4d9b975eff02f9c978725f76a11f3d182209f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.80006399999996,0.00353,1130.0 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/main.log deleted file mode 100644 index ef64a7c30155603c7ac8f59a87dc076df558434f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 16:56:01,251][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:56:01,252][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:56:01,693][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 16:56:01,693][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:56:01,694][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:56:01,694][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:56:01,694][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:56:01,694][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:56:01,808][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:56:01,809][inference][INFO] - Running inference benchmark -[2023-08-29 16:56:01,934][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:56:01,935][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:56:01,978][inference][INFO] - + Forward pass peak memory: 467.80006399999996 (MB) -[2023-08-29 16:56:01,979][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 16:56:01,981][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:56:02,018][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:56:07,063][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-08-29 16:56:07,064][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-08-29 16:56:07,064][inference][INFO] - Saving inference results -[2023-08-29 16:56:07,072][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index eb9ce7ceb635eb8de3cf3c99c1e97fb5bde469b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c4cb5e14f544eabe7ff2d29430fac8f64e16233c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.639168,0.00345,290.0,0.483,207.0 diff --git a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6ca52b7b605e133da14827416f5cb6ae75dc354f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_16:01:43_245dcc49ef9862a7165aec7be9c4a3299b8d06a1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 16:56:12,040][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 16:56:12,041][benchmark][INFO] - + Setting seed(42) -[2023-08-29 16:56:13,760][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 16:56:13,761][backend][INFO] - Configuring pytorch backend -[2023-08-29 16:56:13,761][backend][INFO] - + Checking initial device isolation -[2023-08-29 16:56:13,761][backend][INFO] - + Checking contineous device isolation -[2023-08-29 16:56:13,761][pytorch][INFO] - + Disabling gradients -[2023-08-29 16:56:13,762][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 16:56:14,410][pytorch][INFO] - + Turning on eval mode -[2023-08-29 16:56:14,410][inference][INFO] - Running inference benchmark -[2023-08-29 16:56:14,609][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 16:56:14,660][inference][INFO] - + Forward pass peak memory: 469.639168 (MB) -[2023-08-29 16:56:14,661][inference][INFO] - + Warming up the forward pass -[2023-08-29 16:56:14,694][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 16:56:19,740][inference][INFO] - + Forward pass latency: 3.45e-03 (s) -[2023-08-29 16:56:19,742][inference][INFO] - + Forward pass throughput: 290.00 (samples/s) -[2023-08-29 16:56:19,743][inference][INFO] - + Warming up the generation pass -[2023-08-29 16:56:20,235][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 16:56:25,553][inference][INFO] - + Generation pass latency: 4.83e-01 (s) -[2023-08-29 16:56:25,554][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-29 16:56:25,554][inference][INFO] - Saving inference results -[2023-08-29 16:56:25,566][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 03317f678acca9a12da04412ec93b52911728c46..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index d9999854ef3a5bda9c7689e688a695f373b775f8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.00953599999997,0.00357,280.0 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8badb4d6fb4f57bea5d2b6e061da027df9cab481..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:50:01,383][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:50:01,385][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:50:02,768][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:50:02,769][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:50:02,769][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:50:02,769][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:50:02,769][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:50:02,769][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:50:03,408][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:50:03,409][inference][INFO] - Running inference benchmark -[2023-08-29 18:50:03,533][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:50:03,534][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:50:03,594][inference][INFO] - + Forward pass peak memory: 467.00953599999997 (MB) -[2023-08-29 18:50:03,595][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:50:03,597][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:50:03,632][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:50:08,679][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-08-29 18:50:08,681][inference][INFO] - + Forward pass throughput: 280.00 (samples/s) -[2023-08-29 18:50:08,681][inference][INFO] - Saving inference results -[2023-08-29 18:50:08,692][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e57c5eb82a78f784a4c791401a8267b70507e6b1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d630b9aab76beaf8c9ac0874e8d6abfe0bd07833..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.996672,0.00422,948.0 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/main.log deleted file mode 100644 index b590f5c2b54bd65266db7f4a80576c2f67ab75b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:50:09,078][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:50:09,079][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:50:09,570][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:50:09,570][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:50:09,570][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:50:09,571][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:50:09,571][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:50:09,571][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:50:09,694][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:50:09,695][inference][INFO] - Running inference benchmark -[2023-08-29 18:50:09,824][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:50:09,825][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:50:09,870][inference][INFO] - + Forward pass peak memory: 467.996672 (MB) -[2023-08-29 18:50:09,871][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:50:09,872][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:50:09,923][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:50:14,964][inference][INFO] - + Forward pass latency: 4.22e-03 (s) -[2023-08-29 18:50:14,965][inference][INFO] - + Forward pass throughput: 948.00 (samples/s) -[2023-08-29 18:50:14,965][inference][INFO] - Saving inference results -[2023-08-29 18:50:14,974][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 47555873da07f52ac9b24fc5b7a41d422a31d4c7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4dccbe1051896f800aeeb046662cd99d4fde7944..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.45484799999997,0.00307,326.0,0.482,207.0 diff --git a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index bb0a798ef63e50dd986fd966231daa99f91ec520..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:00:40_0e59c93983b84610db9a4d88be1531ba8d745ff9/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 18:50:19,821][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:50:19,822][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:50:21,246][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 18:50:21,246][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:50:21,247][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:50:21,247][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:50:21,247][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:50:21,247][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:50:22,032][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:50:22,033][inference][INFO] - Running inference benchmark -[2023-08-29 18:50:22,234][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:50:22,283][inference][INFO] - + Forward pass peak memory: 469.45484799999997 (MB) -[2023-08-29 18:50:22,284][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:50:22,317][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:50:27,368][inference][INFO] - + Forward pass latency: 3.07e-03 (s) -[2023-08-29 18:50:27,369][inference][INFO] - + Forward pass throughput: 326.00 (samples/s) -[2023-08-29 18:50:27,370][inference][INFO] - + Warming up the generation pass -[2023-08-29 18:50:27,860][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 18:50:33,167][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-08-29 18:50:33,169][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-29 18:50:33,169][inference][INFO] - Saving inference results -[2023-08-29 18:50:33,185][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0a1bc4f54045ad30912f266c66e7bb95a3ef3bbf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a43b2257bfa21b1960fe9e22298392d420e81335..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.1488,0.00362,276.0 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/main.log deleted file mode 100644 index 133bb4038f0bd66bb804b4c197e20d3d859f689d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:51:44,814][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:51:44,815][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:51:46,184][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:51:46,184][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:51:46,184][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:51:46,184][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:51:46,184][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:51:46,185][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:51:46,924][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:51:46,925][inference][INFO] - Running inference benchmark -[2023-08-29 18:51:47,056][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:51:47,057][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:51:47,116][inference][INFO] - + Forward pass peak memory: 467.1488 (MB) -[2023-08-29 18:51:47,118][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:51:47,119][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:51:47,151][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:51:52,199][inference][INFO] - + Forward pass latency: 3.62e-03 (s) -[2023-08-29 18:51:52,200][inference][INFO] - + Forward pass throughput: 276.00 (samples/s) -[2023-08-29 18:51:52,201][inference][INFO] - Saving inference results -[2023-08-29 18:51:52,211][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2baf70eb7695db5e5daed5bb49ed460708d823ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index fcaf257f87bda080d64758e833484b4f2f6c8393..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.13593599999996,0.00416,962.0 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/main.log deleted file mode 100644 index caed741a42e80055d31e21324eab955cacf24083..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:51:52,593][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:51:52,594][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:51:53,081][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:51:53,081][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:51:53,082][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:51:53,082][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:51:53,082][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:51:53,082][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:51:53,222][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:51:53,222][inference][INFO] - Running inference benchmark -[2023-08-29 18:51:53,351][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:51:53,353][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:51:53,397][inference][INFO] - + Forward pass peak memory: 468.13593599999996 (MB) -[2023-08-29 18:51:53,398][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:51:53,400][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:51:53,443][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:51:58,484][inference][INFO] - + Forward pass latency: 4.16e-03 (s) -[2023-08-29 18:51:58,485][inference][INFO] - + Forward pass throughput: 962.00 (samples/s) -[2023-08-29 18:51:58,485][inference][INFO] - Saving inference results -[2023-08-29 18:51:58,493][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index cd0cf75f042816b7ebc6e8d319b5ba50c4563d0a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 709c06eee74f0854c5a12a5ec1d377ce42904fd3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.21318399999996,0.00391,256.0,0.533,188.0 diff --git a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 84655b9ad62e881e2761349e778e50b91a42aa39..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:26:41_0daeeb40a10178ce219fffbf41791330524eedc1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 18:52:03,495][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:52:03,496][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:52:05,049][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 18:52:05,050][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:52:05,050][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:52:05,050][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:52:05,050][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:52:05,050][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:52:05,702][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:52:05,703][inference][INFO] - Running inference benchmark -[2023-08-29 18:52:05,904][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:52:05,951][inference][INFO] - + Forward pass peak memory: 469.21318399999996 (MB) -[2023-08-29 18:52:05,952][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:52:05,985][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:52:11,030][inference][INFO] - + Forward pass latency: 3.91e-03 (s) -[2023-08-29 18:52:11,032][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-08-29 18:52:11,033][inference][INFO] - + Warming up the generation pass -[2023-08-29 18:52:11,623][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 18:52:16,957][inference][INFO] - + Generation pass latency: 5.33e-01 (s) -[2023-08-29 18:52:16,958][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-08-29 18:52:16,958][inference][INFO] - Saving inference results -[2023-08-29 18:52:16,969][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7b3288a72ff65fa9bf0551b4593582d15af629fe..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 9b72e79aaab9f7b29c300406b731817fc8316b3f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.812928,0.00369,271.0 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/main.log deleted file mode 100644 index 73f453d144412199919404b2114e475ee45dc6f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:53:25,709][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:53:25,710][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:53:26,990][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:53:26,991][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:53:26,991][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:53:26,991][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:53:26,991][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:53:26,992][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:53:27,644][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:53:27,645][inference][INFO] - Running inference benchmark -[2023-08-29 18:53:27,773][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:53:27,774][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:53:27,839][inference][INFO] - + Forward pass peak memory: 466.812928 (MB) -[2023-08-29 18:53:27,840][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:53:27,842][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:53:27,878][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:53:32,925][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-08-29 18:53:32,926][inference][INFO] - + Forward pass throughput: 271.00 (samples/s) -[2023-08-29 18:53:32,927][inference][INFO] - Saving inference results -[2023-08-29 18:53:32,940][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index af41dbc5bbefdd4e95c5d44903f1758ecf6a91a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 726cec4bda0af76442be414953d83f5d5df01fe6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.79187199999996,0.00414,966.0 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/main.log deleted file mode 100644 index b05604eb4e893cd301959af5f7bf6ecc64a1a05c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:53:33,334][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:53:33,335][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:53:33,815][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:53:33,815][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:53:33,816][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:53:33,816][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:53:33,816][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:53:33,816][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:53:33,943][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:53:33,944][inference][INFO] - Running inference benchmark -[2023-08-29 18:53:34,076][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:53:34,077][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:53:34,122][inference][INFO] - + Forward pass peak memory: 467.79187199999996 (MB) -[2023-08-29 18:53:34,123][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:53:34,125][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:53:34,168][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:53:39,210][inference][INFO] - + Forward pass latency: 4.14e-03 (s) -[2023-08-29 18:53:39,211][inference][INFO] - + Forward pass throughput: 966.00 (samples/s) -[2023-08-29 18:53:39,211][inference][INFO] - Saving inference results -[2023-08-29 18:53:39,221][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 69a0be310edaaa2679b195bc13ff09b2f18a9bc9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f39759bd5b81bae9beafc0fee50a072013d6c176..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.372928,0.00383,261.0,0.48,208.0 diff --git a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 516eb24564929e542e762120b5c8a40042324569..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_17:49:15_ce2d4bc6a1be4f3eb2dc3d1bd564a0892665b2c7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 18:53:44,037][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:53:44,038][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:53:45,498][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 18:53:45,499][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:53:45,499][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:53:45,499][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:53:45,499][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:53:45,500][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:53:46,162][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:53:46,163][inference][INFO] - Running inference benchmark -[2023-08-29 18:53:46,367][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:53:46,410][inference][INFO] - + Forward pass peak memory: 469.372928 (MB) -[2023-08-29 18:53:46,412][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:53:46,443][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:53:51,490][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-29 18:53:51,493][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-29 18:53:51,494][inference][INFO] - + Warming up the generation pass -[2023-08-29 18:53:52,021][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 18:53:57,307][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-08-29 18:53:57,308][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-29 18:53:57,308][inference][INFO] - Saving inference results -[2023-08-29 18:53:57,325][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 4da94b7e9cee5e6a4335a042cebda6174006b057..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index aca804aa516a8d53bc68bc7417e914d10a893d91..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.45599999999996,0.00322,311.0 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/main.log deleted file mode 100644 index 33d522ec7d683a91589e62c6d68b48cb74b5e685..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:55:06,161][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:55:06,162][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:55:07,475][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:55:07,475][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:55:07,476][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:55:07,476][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:55:07,476][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:55:07,476][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:55:08,104][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:55:08,104][inference][INFO] - Running inference benchmark -[2023-08-29 18:55:08,232][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:55:08,234][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:55:08,296][inference][INFO] - + Forward pass peak memory: 467.45599999999996 (MB) -[2023-08-29 18:55:08,298][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:55:08,299][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:55:08,337][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:55:13,391][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-08-29 18:55:13,392][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-08-29 18:55:13,393][inference][INFO] - Saving inference results -[2023-08-29 18:55:13,408][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1560828382cb95ad24e023f3c27bbe3cba3ab3e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 6a5bff3f088485da286c32d5b0ec87261eae499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.402176,0.00357,1120.0 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/main.log deleted file mode 100644 index a46d9c619c49d50b40bff5b1314c8e9be590e944..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 18:55:13,901][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:55:13,902][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:55:14,369][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 18:55:14,369][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:55:14,369][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:55:14,370][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:55:14,370][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:55:14,370][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:55:14,491][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:55:14,492][inference][INFO] - Running inference benchmark -[2023-08-29 18:55:14,619][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:55:14,620][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:55:14,661][inference][INFO] - + Forward pass peak memory: 468.402176 (MB) -[2023-08-29 18:55:14,661][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 18:55:14,663][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:55:14,699][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:55:19,743][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-08-29 18:55:19,744][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-29 18:55:19,744][inference][INFO] - Saving inference results -[2023-08-29 18:55:19,752][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 583729acac81939048cb754ce9c5febec67faf93..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c9ea9cc681dc0212f1c3c44b4ef1fdd4babcc51b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.123072,0.00311,322.0,0.481,208.0 diff --git a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 716f9df262eb340a1fa80b6ae2ff37f2c1bef3f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_18:24:24_dbc16f4404eca4a75459683d5135f6accea35a02/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 18:55:24,512][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 18:55:24,513][benchmark][INFO] - + Setting seed(42) -[2023-08-29 18:55:26,084][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 18:55:26,085][backend][INFO] - Configuring pytorch backend -[2023-08-29 18:55:26,085][backend][INFO] - + Checking initial device isolation -[2023-08-29 18:55:26,085][backend][INFO] - + Checking contineous device isolation -[2023-08-29 18:55:26,085][pytorch][INFO] - + Disabling gradients -[2023-08-29 18:55:26,085][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 18:55:26,728][pytorch][INFO] - + Turning on eval mode -[2023-08-29 18:55:26,729][inference][INFO] - Running inference benchmark -[2023-08-29 18:55:26,996][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 18:55:27,045][inference][INFO] - + Forward pass peak memory: 469.123072 (MB) -[2023-08-29 18:55:27,047][inference][INFO] - + Warming up the forward pass -[2023-08-29 18:55:27,080][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 18:55:32,132][inference][INFO] - + Forward pass latency: 3.11e-03 (s) -[2023-08-29 18:55:32,133][inference][INFO] - + Forward pass throughput: 322.00 (samples/s) -[2023-08-29 18:55:32,134][inference][INFO] - + Warming up the generation pass -[2023-08-29 18:55:32,622][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 18:55:37,920][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-29 18:55:37,921][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-29 18:55:37,921][inference][INFO] - Saving inference results -[2023-08-29 18:55:37,938][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e726d8d5ba37449abb0325daa9e9d169c8079ddd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a5ae9e3905cab6837e6f856e30baf31f1e6c530e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.90304,0.00363,275.0 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/main.log deleted file mode 100644 index c73088b28ac9c4c50e2e88dd35d859c237fc9b6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 20:50:02,381][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 20:50:02,383][benchmark][INFO] - + Setting seed(42) -[2023-08-29 20:50:03,677][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 20:50:03,678][backend][INFO] - Configuring pytorch backend -[2023-08-29 20:50:03,678][backend][INFO] - + Checking initial device isolation -[2023-08-29 20:50:03,678][backend][INFO] - + Checking contineous device isolation -[2023-08-29 20:50:03,678][pytorch][INFO] - + Disabling gradients -[2023-08-29 20:50:03,678][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 20:50:04,312][pytorch][INFO] - + Turning on eval mode -[2023-08-29 20:50:04,313][inference][INFO] - Running inference benchmark -[2023-08-29 20:50:04,443][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:50:04,444][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 20:50:04,512][inference][INFO] - + Forward pass peak memory: 466.90304 (MB) -[2023-08-29 20:50:04,513][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:50:04,515][inference][INFO] - + Warming up the forward pass -[2023-08-29 20:50:04,547][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 20:50:09,592][inference][INFO] - + Forward pass latency: 3.63e-03 (s) -[2023-08-29 20:50:09,593][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-08-29 20:50:09,594][inference][INFO] - Saving inference results -[2023-08-29 20:50:09,604][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e7d791a7a520180989a7d595ba019f105604e3bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3bed88376fe871239b69420cb78240760b94970f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.894272,0.00411,973.0 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/main.log deleted file mode 100644 index dcc614af63f93ef8449947cb0fb8bd9030127d2b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 20:50:09,986][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 20:50:09,988][benchmark][INFO] - + Setting seed(42) -[2023-08-29 20:50:10,473][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 20:50:10,473][backend][INFO] - Configuring pytorch backend -[2023-08-29 20:50:10,473][backend][INFO] - + Checking initial device isolation -[2023-08-29 20:50:10,474][backend][INFO] - + Checking contineous device isolation -[2023-08-29 20:50:10,474][pytorch][INFO] - + Disabling gradients -[2023-08-29 20:50:10,474][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 20:50:10,598][pytorch][INFO] - + Turning on eval mode -[2023-08-29 20:50:10,599][inference][INFO] - Running inference benchmark -[2023-08-29 20:50:10,728][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:50:10,730][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 20:50:10,778][inference][INFO] - + Forward pass peak memory: 467.894272 (MB) -[2023-08-29 20:50:10,779][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:50:10,781][inference][INFO] - + Warming up the forward pass -[2023-08-29 20:50:10,823][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 20:50:15,862][inference][INFO] - + Forward pass latency: 4.11e-03 (s) -[2023-08-29 20:50:15,863][inference][INFO] - + Forward pass throughput: 973.00 (samples/s) -[2023-08-29 20:50:15,863][inference][INFO] - Saving inference results -[2023-08-29 20:50:15,871][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 60db644a67b9f20905391df2e10b8c15446a6bf8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 17523861204ea346af13bb70a37aeda1b9ce09b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.04115199999995,0.00327,306.0,0.48,208.0 diff --git a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b7a4c9e1b47bc586307f37d1aa5f300e7166fe6e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:02:57_8c75cfdaeeb9ae960cfdb0ba780d35add282b2df/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 20:50:20,687][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 20:50:20,688][benchmark][INFO] - + Setting seed(42) -[2023-08-29 20:50:22,168][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 20:50:22,168][backend][INFO] - Configuring pytorch backend -[2023-08-29 20:50:22,168][backend][INFO] - + Checking initial device isolation -[2023-08-29 20:50:22,169][backend][INFO] - + Checking contineous device isolation -[2023-08-29 20:50:22,169][pytorch][INFO] - + Disabling gradients -[2023-08-29 20:50:22,169][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 20:50:22,841][pytorch][INFO] - + Turning on eval mode -[2023-08-29 20:50:22,841][inference][INFO] - Running inference benchmark -[2023-08-29 20:50:23,050][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 20:50:23,099][inference][INFO] - + Forward pass peak memory: 469.04115199999995 (MB) -[2023-08-29 20:50:23,100][inference][INFO] - + Warming up the forward pass -[2023-08-29 20:50:23,133][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 20:50:28,184][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-08-29 20:50:28,186][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-08-29 20:50:28,187][inference][INFO] - + Warming up the generation pass -[2023-08-29 20:50:28,675][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 20:50:33,955][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-08-29 20:50:33,956][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-29 20:50:33,956][inference][INFO] - Saving inference results -[2023-08-29 20:50:33,969][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 07ee564a0f538c84d51183c349afa612fcfb23d4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 549b3c7e2a2e7b57d3d41fda237dde126d8bdf5b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.83750399999997,0.0031,323.0 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/main.log b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/main.log deleted file mode 100644 index aa3feb8e3da16f47f4e250c917076607ff63e3e6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 20:51:45,712][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 20:51:45,714][benchmark][INFO] - + Setting seed(42) -[2023-08-29 20:51:47,106][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 20:51:47,107][backend][INFO] - Configuring pytorch backend -[2023-08-29 20:51:47,107][backend][INFO] - + Checking initial device isolation -[2023-08-29 20:51:47,107][backend][INFO] - + Checking contineous device isolation -[2023-08-29 20:51:47,107][pytorch][INFO] - + Disabling gradients -[2023-08-29 20:51:47,107][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 20:51:47,792][pytorch][INFO] - + Turning on eval mode -[2023-08-29 20:51:47,793][inference][INFO] - Running inference benchmark -[2023-08-29 20:51:47,918][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:51:47,920][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 20:51:47,985][inference][INFO] - + Forward pass peak memory: 466.83750399999997 (MB) -[2023-08-29 20:51:47,986][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:51:47,988][inference][INFO] - + Warming up the forward pass -[2023-08-29 20:51:48,024][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 20:51:53,077][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-08-29 20:51:53,079][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-08-29 20:51:53,079][inference][INFO] - Saving inference results -[2023-08-29 20:51:53,089][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3a607820d1d49346f04d7ab24d3d3a9a01912622..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 88567377c08c23e649b18dfa65ecd0ceef7cdec0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.91884799999997,0.00342,1170.0 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/main.log b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2efbd3ed96149109714e99ce5429976f458fb84f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-29 20:51:53,466][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 20:51:53,468][benchmark][INFO] - + Setting seed(42) -[2023-08-29 20:51:53,946][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-29 20:51:53,947][backend][INFO] - Configuring pytorch backend -[2023-08-29 20:51:53,947][backend][INFO] - + Checking initial device isolation -[2023-08-29 20:51:53,947][backend][INFO] - + Checking contineous device isolation -[2023-08-29 20:51:53,947][pytorch][INFO] - + Disabling gradients -[2023-08-29 20:51:53,948][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 20:51:54,077][pytorch][INFO] - + Turning on eval mode -[2023-08-29 20:51:54,078][inference][INFO] - Running inference benchmark -[2023-08-29 20:51:54,200][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:51:54,201][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 20:51:54,249][inference][INFO] - + Forward pass peak memory: 467.91884799999997 (MB) -[2023-08-29 20:51:54,250][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-29 20:51:54,252][inference][INFO] - + Warming up the forward pass -[2023-08-29 20:51:54,287][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 20:51:59,334][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-08-29 20:51:59,335][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-08-29 20:51:59,335][inference][INFO] - Saving inference results -[2023-08-29 20:51:59,345][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 3f6a462134b7f1eeaf2c9ef1182da822834018fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cf4229a0533dc40d35e00d0eda0900ec30fe3b03..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.532672,0.0034,294.0,0.481,208.0 diff --git a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 473d99d243aeed45a2480f64b72aad02341b5ea2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-29_19:10:46_07998ef39926b76d3f6667025535d0859eed61c3/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-29 20:52:04,136][benchmark][INFO] - Configuring inference benchmark -[2023-08-29 20:52:04,137][benchmark][INFO] - + Setting seed(42) -[2023-08-29 20:52:05,885][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-29 20:52:05,885][backend][INFO] - Configuring pytorch backend -[2023-08-29 20:52:05,885][backend][INFO] - + Checking initial device isolation -[2023-08-29 20:52:05,886][backend][INFO] - + Checking contineous device isolation -[2023-08-29 20:52:05,886][pytorch][INFO] - + Disabling gradients -[2023-08-29 20:52:05,886][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-29 20:52:06,540][pytorch][INFO] - + Turning on eval mode -[2023-08-29 20:52:06,540][inference][INFO] - Running inference benchmark -[2023-08-29 20:52:06,777][inference][INFO] - + Tracking forward pass peak memory -[2023-08-29 20:52:06,828][inference][INFO] - + Forward pass peak memory: 469.532672 (MB) -[2023-08-29 20:52:06,829][inference][INFO] - + Warming up the forward pass -[2023-08-29 20:52:06,863][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-29 20:52:11,913][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-29 20:52:11,915][inference][INFO] - + Forward pass throughput: 294.00 (samples/s) -[2023-08-29 20:52:11,915][inference][INFO] - + Warming up the generation pass -[2023-08-29 20:52:12,454][inference][INFO] - + Tracking generation latency and throughput -[2023-08-29 20:52:17,742][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-08-29 20:52:17,743][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-08-29 20:52:17,743][inference][INFO] - Saving inference results -[2023-08-29 20:52:17,758][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index de5f030f80080c7177ca35e29e81312712326476..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index eb82b8aeed51c27c98ade4c53c2a9d74471d1b6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.857984,0.00306,327.0 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/main.log deleted file mode 100644 index 660fe8b329acffd83ee81f67e0570afa7cf32e42..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 10:50:01,145][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 10:50:01,146][benchmark][INFO] - + Setting seed(42) -[2023-08-30 10:50:02,385][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 10:50:02,385][backend][INFO] - Configuring pytorch backend -[2023-08-30 10:50:02,385][backend][INFO] - + Checking initial device isolation -[2023-08-30 10:50:02,386][backend][INFO] - + Checking contineous device isolation -[2023-08-30 10:50:02,386][pytorch][INFO] - + Disabling gradients -[2023-08-30 10:50:02,386][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 10:50:03,039][pytorch][INFO] - + Turning on eval mode -[2023-08-30 10:50:03,040][inference][INFO] - Running inference benchmark -[2023-08-30 10:50:03,158][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 10:50:03,159][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 10:50:03,221][inference][INFO] - + Forward pass peak memory: 466.857984 (MB) -[2023-08-30 10:50:03,222][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 10:50:03,224][inference][INFO] - + Warming up the forward pass -[2023-08-30 10:50:03,260][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 10:50:08,313][inference][INFO] - + Forward pass latency: 3.06e-03 (s) -[2023-08-30 10:50:08,314][inference][INFO] - + Forward pass throughput: 327.00 (samples/s) -[2023-08-30 10:50:08,314][inference][INFO] - Saving inference results -[2023-08-30 10:50:08,327][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 06d7c6a7bf7a10392174abdb07a4ca6bc7218589..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 4267efc9874c5f82453267e4e32ba2b541c03c3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.869696,0.00377,1060.0 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/main.log deleted file mode 100644 index e854cc1697ce210b6d5fa634dc6530d0ba5deb2d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 10:50:08,699][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 10:50:08,700][benchmark][INFO] - + Setting seed(42) -[2023-08-30 10:50:09,253][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 10:50:09,253][backend][INFO] - Configuring pytorch backend -[2023-08-30 10:50:09,254][backend][INFO] - + Checking initial device isolation -[2023-08-30 10:50:09,254][backend][INFO] - + Checking contineous device isolation -[2023-08-30 10:50:09,254][pytorch][INFO] - + Disabling gradients -[2023-08-30 10:50:09,254][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 10:50:09,369][pytorch][INFO] - + Turning on eval mode -[2023-08-30 10:50:09,370][inference][INFO] - Running inference benchmark -[2023-08-30 10:50:09,531][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 10:50:09,532][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 10:50:09,575][inference][INFO] - + Forward pass peak memory: 467.869696 (MB) -[2023-08-30 10:50:09,576][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 10:50:09,578][inference][INFO] - + Warming up the forward pass -[2023-08-30 10:50:09,619][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 10:50:14,662][inference][INFO] - + Forward pass latency: 3.77e-03 (s) -[2023-08-30 10:50:14,664][inference][INFO] - + Forward pass throughput: 1060.00 (samples/s) -[2023-08-30 10:50:14,664][inference][INFO] - Saving inference results -[2023-08-30 10:50:14,672][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 8b77b4a383eddd28b404659b09bee98f736fc396..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 85f4b4a9357ad2b9c4b189f12ab458bb659d1813..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.64736,0.00311,322.0,0.488,205.0 diff --git a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8e222f7dcd34ee6984464661a39248b0eca4fbd3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_09:52:41_1bf2f36daf6731f001ea88ae53ba96acfb6c8497/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 10:50:19,581][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 10:50:19,583][benchmark][INFO] - + Setting seed(42) -[2023-08-30 10:50:21,032][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 10:50:21,032][backend][INFO] - Configuring pytorch backend -[2023-08-30 10:50:21,032][backend][INFO] - + Checking initial device isolation -[2023-08-30 10:50:21,032][backend][INFO] - + Checking contineous device isolation -[2023-08-30 10:50:21,033][pytorch][INFO] - + Disabling gradients -[2023-08-30 10:50:21,033][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 10:50:21,708][pytorch][INFO] - + Turning on eval mode -[2023-08-30 10:50:21,709][inference][INFO] - Running inference benchmark -[2023-08-30 10:50:22,021][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 10:50:22,075][inference][INFO] - + Forward pass peak memory: 469.64736 (MB) -[2023-08-30 10:50:22,077][inference][INFO] - + Warming up the forward pass -[2023-08-30 10:50:22,114][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 10:50:27,165][inference][INFO] - + Forward pass latency: 3.11e-03 (s) -[2023-08-30 10:50:27,167][inference][INFO] - + Forward pass throughput: 322.00 (samples/s) -[2023-08-30 10:50:27,168][inference][INFO] - + Warming up the generation pass -[2023-08-30 10:50:27,661][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 10:50:33,025][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-08-30 10:50:33,026][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-08-30 10:50:33,026][inference][INFO] - Saving inference results -[2023-08-30 10:50:33,039][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 11ce92f985c6ae7e2bcd4599b0e66b86c4a42d29..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 825e717420732aaaf9e0606b175ce747be7b4377..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.587648,0.00315,317.0 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/main.log deleted file mode 100644 index e27bfe9e2b5eea1919c4f8b6b9b5bb3902f72f16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 12:58:44,837][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 12:58:44,838][benchmark][INFO] - + Setting seed(42) -[2023-08-30 12:58:46,061][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 12:58:46,062][backend][INFO] - Configuring pytorch backend -[2023-08-30 12:58:46,062][backend][INFO] - + Checking initial device isolation -[2023-08-30 12:58:46,062][backend][INFO] - + Checking contineous device isolation -[2023-08-30 12:58:46,062][pytorch][INFO] - + Disabling gradients -[2023-08-30 12:58:46,063][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 12:58:46,737][pytorch][INFO] - + Turning on eval mode -[2023-08-30 12:58:46,738][inference][INFO] - Running inference benchmark -[2023-08-30 12:58:46,857][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 12:58:46,858][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 12:58:46,922][inference][INFO] - + Forward pass peak memory: 466.587648 (MB) -[2023-08-30 12:58:46,923][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 12:58:46,925][inference][INFO] - + Warming up the forward pass -[2023-08-30 12:58:46,962][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 12:58:52,016][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-08-30 12:58:52,019][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-08-30 12:58:52,019][inference][INFO] - Saving inference results -[2023-08-30 12:58:52,032][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index bf9398cfbb84097620812dabb638fb24b09da7c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 375c54cdd564b9e6f5917ef07788c08c251c2dee..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.62803199999996,0.00347,1150.0 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2ac656b306069c7dcb45d0b9d9ef381be7cd5bae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 12:58:52,417][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 12:58:52,418][benchmark][INFO] - + Setting seed(42) -[2023-08-30 12:58:52,887][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 12:58:52,887][backend][INFO] - Configuring pytorch backend -[2023-08-30 12:58:52,888][backend][INFO] - + Checking initial device isolation -[2023-08-30 12:58:52,888][backend][INFO] - + Checking contineous device isolation -[2023-08-30 12:58:52,888][pytorch][INFO] - + Disabling gradients -[2023-08-30 12:58:52,888][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 12:58:53,004][pytorch][INFO] - + Turning on eval mode -[2023-08-30 12:58:53,004][inference][INFO] - Running inference benchmark -[2023-08-30 12:58:53,126][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 12:58:53,127][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 12:58:53,170][inference][INFO] - + Forward pass peak memory: 467.62803199999996 (MB) -[2023-08-30 12:58:53,171][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 12:58:53,173][inference][INFO] - + Warming up the forward pass -[2023-08-30 12:58:53,210][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 12:58:58,256][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-08-30 12:58:58,257][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-30 12:58:58,257][inference][INFO] - Saving inference results -[2023-08-30 12:58:58,266][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index eba403348585a8999815eb56dbd3ba120d7f9fb6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e54fa161257fbb8d3a5c6f34303c008b8e9cd737..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.291008,0.00383,261.0,0.492,203.0 diff --git a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0310bc9e88a6c53a0a3e20275ea84f70f5836d9e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_10:56:05_52574026b6740a3882d6dd1cbf1e1663d4cea27b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 12:59:03,076][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 12:59:03,077][benchmark][INFO] - + Setting seed(42) -[2023-08-30 12:59:04,758][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 12:59:04,759][backend][INFO] - Configuring pytorch backend -[2023-08-30 12:59:04,759][backend][INFO] - + Checking initial device isolation -[2023-08-30 12:59:04,759][backend][INFO] - + Checking contineous device isolation -[2023-08-30 12:59:04,759][pytorch][INFO] - + Disabling gradients -[2023-08-30 12:59:04,759][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 12:59:05,439][pytorch][INFO] - + Turning on eval mode -[2023-08-30 12:59:05,439][inference][INFO] - Running inference benchmark -[2023-08-30 12:59:05,637][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 12:59:05,683][inference][INFO] - + Forward pass peak memory: 469.291008 (MB) -[2023-08-30 12:59:05,684][inference][INFO] - + Warming up the forward pass -[2023-08-30 12:59:05,716][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 12:59:10,762][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-08-30 12:59:10,764][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-08-30 12:59:10,765][inference][INFO] - + Warming up the generation pass -[2023-08-30 12:59:11,342][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 12:59:16,761][inference][INFO] - + Generation pass latency: 4.92e-01 (s) -[2023-08-30 12:59:16,763][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-30 12:59:16,763][inference][INFO] - Saving inference results -[2023-08-30 12:59:16,775][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 46443ba326481423a66afa88e91d91987c51b082..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c1af921da2865b683679aa12bfbb7b54f5f4477d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.243008,0.00313,319.0 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1ac269a6de093892b93afb5717562a47c8600983..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:49:53,713][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:49:53,714][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:49:55,321][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:49:55,322][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:49:55,322][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:49:55,322][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:49:55,322][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:49:55,322][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:49:55,928][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:49:55,929][inference][INFO] - Running inference benchmark -[2023-08-30 14:49:56,059][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:49:56,060][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:49:56,119][inference][INFO] - + Forward pass peak memory: 467.243008 (MB) -[2023-08-30 14:49:56,121][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:49:56,122][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:49:56,159][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:50:01,211][inference][INFO] - + Forward pass latency: 3.13e-03 (s) -[2023-08-30 14:50:01,213][inference][INFO] - + Forward pass throughput: 319.00 (samples/s) -[2023-08-30 14:50:01,213][inference][INFO] - Saving inference results -[2023-08-30 14:50:01,224][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f566932796f406c61046b57a05db362193c4eeb4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 15b3f0e8c695fb4e175965fffef53b55d7b2f276..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.29568,0.00348,1150.0 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4f95fa0956f6ebd0e0d7a4c70d4f398dc3425eeb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:50:01,608][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:50:01,610][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:50:02,043][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:50:02,044][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:50:02,044][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:50:02,044][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:50:02,044][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:50:02,044][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:50:02,159][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:50:02,159][inference][INFO] - Running inference benchmark -[2023-08-30 14:50:02,282][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:50:02,283][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:50:02,327][inference][INFO] - + Forward pass peak memory: 468.29568 (MB) -[2023-08-30 14:50:02,328][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:50:02,329][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:50:02,365][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:50:07,412][inference][INFO] - + Forward pass latency: 3.48e-03 (s) -[2023-08-30 14:50:07,413][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-08-30 14:50:07,413][inference][INFO] - Saving inference results -[2023-08-30 14:50:07,423][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7b196a98131b91c8484cb59326edf5a8ad8210d4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a2c60fb07375f2790a9e69712c3e4a92b5fcf7af..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.061632,0.00386,259.0,0.516,194.0 diff --git a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4688040691830b3220b831e4ad198f124e2d37be..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:13:50_62399d6f3568d1436e3e0364a32d13e32bb78cb6/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 14:50:12,192][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:50:12,194][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:50:13,666][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 14:50:13,666][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:50:13,666][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:50:13,666][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:50:13,667][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:50:13,667][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:50:14,317][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:50:14,317][inference][INFO] - Running inference benchmark -[2023-08-30 14:50:14,523][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:50:14,572][inference][INFO] - + Forward pass peak memory: 469.061632 (MB) -[2023-08-30 14:50:14,573][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:50:14,607][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:50:19,652][inference][INFO] - + Forward pass latency: 3.86e-03 (s) -[2023-08-30 14:50:19,653][inference][INFO] - + Forward pass throughput: 259.00 (samples/s) -[2023-08-30 14:50:19,654][inference][INFO] - + Warming up the generation pass -[2023-08-30 14:50:20,215][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 14:50:25,379][inference][INFO] - + Generation pass latency: 5.16e-01 (s) -[2023-08-30 14:50:25,380][inference][INFO] - + Generation pass throughput: 194.00 (tokens/s) -[2023-08-30 14:50:25,380][inference][INFO] - Saving inference results -[2023-08-30 14:50:25,392][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index dab5dfff8d64047f15942f42fa1bb86b4dffa63f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8f5476c16f3a4f15a8f7abbe2fdf90d28a63a4d0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.857984,0.00368,272.0 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/main.log deleted file mode 100644 index 83d3a3b36dc821ab755235ab8f620d75460e3b90..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:51:37,318][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:51:37,320][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:51:39,207][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:51:39,207][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:51:39,207][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:51:39,207][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:51:39,207][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:51:39,208][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:51:39,860][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:51:39,861][inference][INFO] - Running inference benchmark -[2023-08-30 14:51:40,566][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:51:40,568][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:51:40,640][inference][INFO] - + Forward pass peak memory: 466.857984 (MB) -[2023-08-30 14:51:40,641][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:51:40,643][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:51:40,675][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:51:45,720][inference][INFO] - + Forward pass latency: 3.68e-03 (s) -[2023-08-30 14:51:45,722][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-08-30 14:51:45,722][inference][INFO] - Saving inference results -[2023-08-30 14:51:45,732][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index debd4695b7acf633a5f558db167b8575d2dfec6a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b3871db16602b6bc63b6e65728953a2cba7d9b6f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.755008,0.00414,966.0 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/main.log deleted file mode 100644 index c87093064ba090cccebfadc015baf02a269b2a9a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:51:46,099][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:51:46,100][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:51:46,554][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:51:46,554][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:51:46,554][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:51:46,554][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:51:46,555][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:51:46,555][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:51:46,672][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:51:46,673][inference][INFO] - Running inference benchmark -[2023-08-30 14:51:46,798][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:51:46,799][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:51:46,843][inference][INFO] - + Forward pass peak memory: 467.755008 (MB) -[2023-08-30 14:51:46,844][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:51:46,845][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:51:46,888][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:51:51,932][inference][INFO] - + Forward pass latency: 4.14e-03 (s) -[2023-08-30 14:51:51,933][inference][INFO] - + Forward pass throughput: 966.00 (samples/s) -[2023-08-30 14:51:51,933][inference][INFO] - Saving inference results -[2023-08-30 14:51:51,941][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d2e95ac6f6859895cf913ad4a43addcb237acd6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5f2522a0302495db33f7a1ef726f8997678b1fdd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.25824,0.0038,263.0,0.56,179.0 diff --git a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c214dda23dfd77f3c8cf5293175d32241880b44e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:16:16_09dc99517f5f38ee210cf1145a7b17fc99b37dac/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 14:51:56,737][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:51:56,738][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:51:58,358][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 14:51:58,358][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:51:58,359][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:51:58,359][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:51:58,359][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:51:58,359][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:51:59,107][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:51:59,107][inference][INFO] - Running inference benchmark -[2023-08-30 14:51:59,301][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:51:59,348][inference][INFO] - + Forward pass peak memory: 469.25824 (MB) -[2023-08-30 14:51:59,349][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:51:59,382][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:52:04,426][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-30 14:52:04,428][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-30 14:52:04,429][inference][INFO] - + Warming up the generation pass -[2023-08-30 14:52:05,018][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 14:52:10,055][inference][INFO] - + Generation pass latency: 5.60e-01 (s) -[2023-08-30 14:52:10,056][inference][INFO] - + Generation pass throughput: 179.00 (tokens/s) -[2023-08-30 14:52:10,056][inference][INFO] - Saving inference results -[2023-08-30 14:52:10,069][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 66d50d3ace5e9a9ab4bcb947ef938dc104c9156b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6f1981a4d46bb338d7f7a7de1011b142e6713d58..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.030016,0.00371,270.0 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/main.log deleted file mode 100644 index c084478c81da12b866ec694fc7a2911c84d68033..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:53:18,860][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:53:18,861][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:53:20,610][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:53:20,610][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:53:20,610][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:53:20,610][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:53:20,610][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:53:20,611][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:53:21,220][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:53:21,220][inference][INFO] - Running inference benchmark -[2023-08-30 14:53:21,344][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:53:21,345][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:53:21,410][inference][INFO] - + Forward pass peak memory: 467.030016 (MB) -[2023-08-30 14:53:21,411][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:53:21,413][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:53:21,451][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:53:26,497][inference][INFO] - + Forward pass latency: 3.71e-03 (s) -[2023-08-30 14:53:26,498][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-08-30 14:53:26,498][inference][INFO] - Saving inference results -[2023-08-30 14:53:26,509][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 449f565e49aec5bab9485c3ad302c4c2deb7475a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e7caf68147b4588cc4f6f2fb3ebe50ed51df2254..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.000768,0.00412,971.0 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7e4337634a79b7d9c0dd572281a7084d96b6ec34..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:53:27,116][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:53:27,117][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:53:27,979][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:53:27,980][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:53:27,980][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:53:27,980][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:53:27,980][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:53:27,981][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:53:28,098][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:53:28,098][inference][INFO] - Running inference benchmark -[2023-08-30 14:53:28,237][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:53:28,238][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:53:28,287][inference][INFO] - + Forward pass peak memory: 468.000768 (MB) -[2023-08-30 14:53:28,288][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:53:28,290][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:53:28,332][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:53:33,374][inference][INFO] - + Forward pass latency: 4.12e-03 (s) -[2023-08-30 14:53:33,375][inference][INFO] - + Forward pass throughput: 971.00 (samples/s) -[2023-08-30 14:53:33,375][inference][INFO] - Saving inference results -[2023-08-30 14:53:33,383][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 09208d9cf429889ed2ebc564993a3c972488561c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cff6474277ae96bd61a849cf8b44d4152abe0d57..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.13536,0.00391,256.0,0.5,200.0 diff --git a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b5059e63c0a20f3a6432d7c110444f69fa81babb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_13:40:39_ed290b083751590ba79e3a699608c8e9b70d5d9e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 14:53:38,163][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:53:38,164][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:53:39,611][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 14:53:39,612][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:53:39,612][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:53:39,612][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:53:39,612][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:53:39,612][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:53:40,297][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:53:40,298][inference][INFO] - Running inference benchmark -[2023-08-30 14:53:40,497][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:53:40,548][inference][INFO] - + Forward pass peak memory: 469.13536 (MB) -[2023-08-30 14:53:40,550][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:53:40,584][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:53:45,627][inference][INFO] - + Forward pass latency: 3.91e-03 (s) -[2023-08-30 14:53:45,628][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-08-30 14:53:45,629][inference][INFO] - + Warming up the generation pass -[2023-08-30 14:53:46,181][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 14:53:51,187][inference][INFO] - + Generation pass latency: 5.00e-01 (s) -[2023-08-30 14:53:51,188][inference][INFO] - + Generation pass throughput: 200.00 (tokens/s) -[2023-08-30 14:53:51,188][inference][INFO] - Saving inference results -[2023-08-30 14:53:51,200][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3a599af0eb23619be23926697dff230f6e4aa010..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2531cb6f9f8c98061382e44e5834109b22ef2a72..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06278399999997,0.00388,258.0 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/main.log deleted file mode 100644 index d9745b1c66083fa8e2189428052397d8364e5272..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:54:59,724][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:54:59,726][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:55:00,940][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:55:00,940][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:55:00,940][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:55:00,941][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:55:00,941][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:55:00,941][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:55:01,584][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:55:01,585][inference][INFO] - Running inference benchmark -[2023-08-30 14:55:01,711][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:55:01,712][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:55:01,779][inference][INFO] - + Forward pass peak memory: 467.06278399999997 (MB) -[2023-08-30 14:55:01,780][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:55:01,782][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:55:01,814][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:55:06,861][inference][INFO] - + Forward pass latency: 3.88e-03 (s) -[2023-08-30 14:55:06,862][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-30 14:55:06,862][inference][INFO] - Saving inference results -[2023-08-30 14:55:06,873][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 7822280d4e78805f418b0f49df6f85518fba4ce6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 5f9987c6ba592f987425ed407e6cdcd53626d5fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.92704,0.00434,922.0 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/main.log deleted file mode 100644 index bfa2cb5a121c947fe9e974e1c02e949e13a436de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 14:55:07,263][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:55:07,264][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:55:07,772][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 14:55:07,772][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:55:07,772][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:55:07,773][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:55:07,773][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:55:07,773][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:55:07,897][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:55:07,897][inference][INFO] - Running inference benchmark -[2023-08-30 14:55:08,065][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:55:08,066][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:55:08,116][inference][INFO] - + Forward pass peak memory: 467.92704 (MB) -[2023-08-30 14:55:08,117][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 14:55:08,118][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:55:08,163][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:55:13,203][inference][INFO] - + Forward pass latency: 4.34e-03 (s) -[2023-08-30 14:55:13,204][inference][INFO] - + Forward pass throughput: 922.00 (samples/s) -[2023-08-30 14:55:13,204][inference][INFO] - Saving inference results -[2023-08-30 14:55:13,212][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6865e8dbcba8be7429450840d018db0003ac97e5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 95168d4bcf2428aa36aace0333b01908a99a445b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.262336,0.00323,310.0,0.491,204.0 diff --git a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4b28620d60967c2f22eb3ffc941f48bda521914b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_14:11:54_f73c20970c5cf575dd341d18216c42bec0b8a0e5/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 14:55:17,951][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 14:55:17,953][benchmark][INFO] - + Setting seed(42) -[2023-08-30 14:55:19,434][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 14:55:19,434][backend][INFO] - Configuring pytorch backend -[2023-08-30 14:55:19,434][backend][INFO] - + Checking initial device isolation -[2023-08-30 14:55:19,434][backend][INFO] - + Checking contineous device isolation -[2023-08-30 14:55:19,435][pytorch][INFO] - + Disabling gradients -[2023-08-30 14:55:19,435][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 14:55:20,086][pytorch][INFO] - + Turning on eval mode -[2023-08-30 14:55:20,087][inference][INFO] - Running inference benchmark -[2023-08-30 14:55:20,301][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 14:55:20,350][inference][INFO] - + Forward pass peak memory: 469.262336 (MB) -[2023-08-30 14:55:20,352][inference][INFO] - + Warming up the forward pass -[2023-08-30 14:55:20,385][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 14:55:25,433][inference][INFO] - + Forward pass latency: 3.23e-03 (s) -[2023-08-30 14:55:25,435][inference][INFO] - + Forward pass throughput: 310.00 (samples/s) -[2023-08-30 14:55:25,435][inference][INFO] - + Warming up the generation pass -[2023-08-30 14:55:25,937][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 14:55:31,341][inference][INFO] - + Generation pass latency: 4.91e-01 (s) -[2023-08-30 14:55:31,342][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-08-30 14:55:31,342][inference][INFO] - Saving inference results -[2023-08-30 14:55:31,354][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 46ec4a553369567c64f080b2277654015f075bae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8bae50e3f00af7153451b33108c610a24e5cc8c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.874368,0.0038,263.0 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8a2ec1b900a972120b3f31e13da5295ddce5b1ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:49:58,931][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:49:58,932][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:50:00,122][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:50:00,122][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:50:00,122][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:50:00,122][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:50:00,123][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:50:00,123][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:50:00,737][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:50:00,738][inference][INFO] - Running inference benchmark -[2023-08-30 16:50:00,858][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:50:00,860][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:50:00,922][inference][INFO] - + Forward pass peak memory: 466.874368 (MB) -[2023-08-30 16:50:00,923][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:50:00,925][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:50:00,963][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:50:06,009][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-30 16:50:06,010][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-30 16:50:06,011][inference][INFO] - Saving inference results -[2023-08-30 16:50:06,020][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3947eb99c1c1881725faed43d48be1195cc30062..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9fd5d533701964f31bf44009a47c59107895b001..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.820544,0.00429,932.0 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/main.log deleted file mode 100644 index ddc31974d834328122d165960b9276b0acf23a24..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:50:06,397][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:50:06,399][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:50:06,834][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:50:06,835][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:50:06,835][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:50:06,835][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:50:06,835][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:50:06,835][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:50:06,955][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:50:06,956][inference][INFO] - Running inference benchmark -[2023-08-30 16:50:07,079][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:50:07,080][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:50:07,127][inference][INFO] - + Forward pass peak memory: 467.820544 (MB) -[2023-08-30 16:50:07,128][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:50:07,130][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:50:07,173][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:50:12,215][inference][INFO] - + Forward pass latency: 4.29e-03 (s) -[2023-08-30 16:50:12,216][inference][INFO] - + Forward pass throughput: 932.00 (samples/s) -[2023-08-30 16:50:12,216][inference][INFO] - Saving inference results -[2023-08-30 16:50:12,223][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 268562bec4c7906137305b7d85aca232be81bcdd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 814c9a15a67cff8149ac2cec61aa49f77be0d614..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.426176,0.00376,266.0,0.489,204.0 diff --git a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 33c44bc1853a53564b1e4e9b903ed63d1290e50f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:00:36_72298178bcbb5f3cb34af5283ac36dad8b869fb5/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 16:50:17,203][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:50:17,204][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:50:18,656][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 16:50:18,656][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:50:18,657][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:50:18,657][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:50:18,657][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:50:18,657][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:50:19,302][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:50:19,302][inference][INFO] - Running inference benchmark -[2023-08-30 16:50:19,503][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:50:19,553][inference][INFO] - + Forward pass peak memory: 469.426176 (MB) -[2023-08-30 16:50:19,554][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:50:19,587][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:50:24,630][inference][INFO] - + Forward pass latency: 3.76e-03 (s) -[2023-08-30 16:50:24,631][inference][INFO] - + Forward pass throughput: 266.00 (samples/s) -[2023-08-30 16:50:24,632][inference][INFO] - + Warming up the generation pass -[2023-08-30 16:50:25,132][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 16:50:30,511][inference][INFO] - + Generation pass latency: 4.89e-01 (s) -[2023-08-30 16:50:30,512][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-08-30 16:50:30,513][inference][INFO] - Saving inference results -[2023-08-30 16:50:30,523][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9d9e1de6d2c10afb7c1f7c3b8a4bfec9fb9e75da..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index efbfb7198d0543cf8c914468c95dab5f9cb98223..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.492864,0.00314,318.0 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/main.log deleted file mode 100644 index aa1e47b0fddaf85db25f449b544ecd6c96ac31cc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:51:41,884][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:51:41,886][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:51:43,148][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:51:43,148][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:51:43,149][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:51:43,149][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:51:43,149][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:51:43,149][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:51:43,760][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:51:43,760][inference][INFO] - Running inference benchmark -[2023-08-30 16:51:43,883][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:51:43,885][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:51:43,944][inference][INFO] - + Forward pass peak memory: 467.492864 (MB) -[2023-08-30 16:51:43,945][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:51:43,947][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:51:43,980][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:51:49,034][inference][INFO] - + Forward pass latency: 3.14e-03 (s) -[2023-08-30 16:51:49,036][inference][INFO] - + Forward pass throughput: 318.00 (samples/s) -[2023-08-30 16:51:49,036][inference][INFO] - Saving inference results -[2023-08-30 16:51:49,048][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 84c71117686ef60db279685baa8ad810c6419425..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 796b333ccb4ea43c30213b3d2a96e8ab69ad5db3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.52915199999995,0.0035,1140.0 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1225e1037db1ef9b0c4267cd18159accd7b6075c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:51:49,422][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:51:49,423][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:51:49,867][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:51:49,867][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:51:49,868][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:51:49,868][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:51:49,868][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:51:49,868][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:51:49,983][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:51:49,984][inference][INFO] - Running inference benchmark -[2023-08-30 16:51:50,167][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:51:50,168][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:51:50,211][inference][INFO] - + Forward pass peak memory: 468.52915199999995 (MB) -[2023-08-30 16:51:50,212][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:51:50,213][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:51:50,250][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:51:55,295][inference][INFO] - + Forward pass latency: 3.50e-03 (s) -[2023-08-30 16:51:55,297][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-08-30 16:51:55,297][inference][INFO] - Saving inference results -[2023-08-30 16:51:55,304][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 92bb112a76813390f3b81eaf41386688b905ef3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 27642e8fbed014da38fe152e4f438be1d2ff7635..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.245952,0.00312,321.0,0.536,187.0 diff --git a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5b8f5cd106f273efe41bef88cfddc9ec0c6856d9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_15:23:44_459bc6738c162511fabf5b9102171db1fc8bb53e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 16:52:00,179][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:52:00,180][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:52:01,609][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 16:52:01,609][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:52:01,609][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:52:01,610][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:52:01,610][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:52:01,610][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:52:02,254][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:52:02,255][inference][INFO] - Running inference benchmark -[2023-08-30 16:52:02,486][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:52:02,532][inference][INFO] - + Forward pass peak memory: 469.245952 (MB) -[2023-08-30 16:52:02,533][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:52:02,565][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:52:07,617][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-08-30 16:52:07,618][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-08-30 16:52:07,618][inference][INFO] - + Warming up the generation pass -[2023-08-30 16:52:08,118][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 16:52:13,482][inference][INFO] - + Generation pass latency: 5.36e-01 (s) -[2023-08-30 16:52:13,483][inference][INFO] - + Generation pass throughput: 187.00 (tokens/s) -[2023-08-30 16:52:13,483][inference][INFO] - Saving inference results -[2023-08-30 16:52:13,495][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5e6005fc21fc27cd2383088f90a878ee2f26253a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 59de6358b0a483dc6fbd8fe94d7fc9d6c3273425..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.898944,0.00324,309.0 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/main.log deleted file mode 100644 index 11eeda8dca38466f7a5052820696b4956cee2981..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:53:22,973][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:53:22,974][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:53:24,461][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:53:24,462][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:53:24,462][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:53:24,462][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:53:24,462][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:53:24,462][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:53:25,312][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:53:25,313][inference][INFO] - Running inference benchmark -[2023-08-30 16:53:25,433][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:53:25,434][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:53:25,494][inference][INFO] - + Forward pass peak memory: 466.898944 (MB) -[2023-08-30 16:53:25,495][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:53:25,497][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:53:25,529][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:53:30,580][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-08-30 16:53:30,581][inference][INFO] - + Forward pass throughput: 309.00 (samples/s) -[2023-08-30 16:53:30,582][inference][INFO] - Saving inference results -[2023-08-30 16:53:30,593][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ad23dec1e7bee5d706312aaa7979caca0106700c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 935fb0ddf0e272cfde41c959f6d6b35f64c9c804..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.939328,0.00359,1110.0 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/main.log deleted file mode 100644 index c2b9f475c82930662a840c22c67fd4e5e32604b5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:53:30,994][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:53:30,996][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:53:31,436][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:53:31,436][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:53:31,436][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:53:31,436][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:53:31,437][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:53:31,437][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:53:31,844][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:53:31,845][inference][INFO] - Running inference benchmark -[2023-08-30 16:53:31,968][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:53:31,969][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:53:32,013][inference][INFO] - + Forward pass peak memory: 467.939328 (MB) -[2023-08-30 16:53:32,014][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:53:32,016][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:53:32,053][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:53:37,099][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-08-30 16:53:37,101][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-08-30 16:53:37,101][inference][INFO] - Saving inference results -[2023-08-30 16:53:37,108][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 3054dec6d6a50b2eb33784adc1ab293d32e0ddad..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8816668d41f76c98ebe88b3e3f6a7fe2ae894db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.16403199999996,0.00335,299.0,0.485,206.0 diff --git a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c5565c73bfac96a8afe5c1202fd797853d10b4de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:10:01_9219d1427bf3e868c76fd495bb469cf5e1542242/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 16:53:41,935][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:53:41,935][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:53:43,379][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 16:53:43,380][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:53:43,380][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:53:43,380][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:53:43,380][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:53:43,380][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:53:44,025][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:53:44,025][inference][INFO] - Running inference benchmark -[2023-08-30 16:53:44,222][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:53:44,272][inference][INFO] - + Forward pass peak memory: 469.16403199999996 (MB) -[2023-08-30 16:53:44,274][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:53:44,313][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:53:49,360][inference][INFO] - + Forward pass latency: 3.35e-03 (s) -[2023-08-30 16:53:49,361][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-30 16:53:49,362][inference][INFO] - + Warming up the generation pass -[2023-08-30 16:53:49,856][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 16:53:55,197][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-08-30 16:53:55,197][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-30 16:53:55,198][inference][INFO] - Saving inference results -[2023-08-30 16:53:55,211][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2b4280e5253f429f7d7bd7b8a88139ebfaec5e24..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0cfba03973a14db3437694c4f407e5a4f470f8fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.755584,0.00365,274.0 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1ee24d37f46f8d835075e568d1c204182380c301..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:55:04,661][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:55:04,662][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:55:05,876][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:55:05,877][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:55:05,877][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:55:05,877][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:55:05,877][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:55:05,877][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:55:06,481][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:55:06,481][inference][INFO] - Running inference benchmark -[2023-08-30 16:55:06,601][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:55:06,602][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:55:06,662][inference][INFO] - + Forward pass peak memory: 466.755584 (MB) -[2023-08-30 16:55:06,663][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:55:06,666][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:55:06,707][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:55:11,756][inference][INFO] - + Forward pass latency: 3.65e-03 (s) -[2023-08-30 16:55:11,758][inference][INFO] - + Forward pass throughput: 274.00 (samples/s) -[2023-08-30 16:55:11,758][inference][INFO] - Saving inference results -[2023-08-30 16:55:11,772][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f14181e4ed358d6b4a0929dc94497de57aa27026..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 4aaec99eaa9c7e490efca3503f1ce8f1b15001d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.714048,0.00358,1120.0 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/main.log deleted file mode 100644 index 37997f2632bc6813c47d405db85bbeaf0992e057..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 16:55:12,168][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:55:12,169][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:55:13,007][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 16:55:13,007][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:55:13,007][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:55:13,007][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:55:13,008][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:55:13,008][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:55:13,127][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:55:13,127][inference][INFO] - Running inference benchmark -[2023-08-30 16:55:13,248][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:55:13,250][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:55:13,291][inference][INFO] - + Forward pass peak memory: 467.714048 (MB) -[2023-08-30 16:55:13,291][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 16:55:13,293][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:55:13,337][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:55:18,386][inference][INFO] - + Forward pass latency: 3.58e-03 (s) -[2023-08-30 16:55:18,387][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-30 16:55:18,387][inference][INFO] - Saving inference results -[2023-08-30 16:55:18,395][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d4c7752b1d7252bb9ff32d42ae9000d49f0343bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fb0902ee98d7fc5e9410254332223dac5201b95d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.520384,0.0038,263.0,0.492,203.0 diff --git a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2b7164c0c1f2df2e2b7c2f25098d5bdddf34909b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_16:15:07_1c6f072db0c17c7d82bb0d3b7529d57ebc9a0f2f/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 16:55:23,253][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 16:55:23,254][benchmark][INFO] - + Setting seed(42) -[2023-08-30 16:55:24,800][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 16:55:24,801][backend][INFO] - Configuring pytorch backend -[2023-08-30 16:55:24,801][backend][INFO] - + Checking initial device isolation -[2023-08-30 16:55:24,801][backend][INFO] - + Checking contineous device isolation -[2023-08-30 16:55:24,801][pytorch][INFO] - + Disabling gradients -[2023-08-30 16:55:24,801][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 16:55:25,422][pytorch][INFO] - + Turning on eval mode -[2023-08-30 16:55:25,423][inference][INFO] - Running inference benchmark -[2023-08-30 16:55:25,614][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 16:55:25,660][inference][INFO] - + Forward pass peak memory: 469.520384 (MB) -[2023-08-30 16:55:25,661][inference][INFO] - + Warming up the forward pass -[2023-08-30 16:55:25,702][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 16:55:30,749][inference][INFO] - + Forward pass latency: 3.80e-03 (s) -[2023-08-30 16:55:30,751][inference][INFO] - + Forward pass throughput: 263.00 (samples/s) -[2023-08-30 16:55:30,752][inference][INFO] - + Warming up the generation pass -[2023-08-30 16:55:31,261][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 16:55:36,679][inference][INFO] - + Generation pass latency: 4.92e-01 (s) -[2023-08-30 16:55:36,680][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-30 16:55:36,680][inference][INFO] - Saving inference results -[2023-08-30 16:55:36,692][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6bc21851d7b064d9b325e6fdb377f5fa43d03ba2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index ebd2264190b32c03fecaa270beaba611f925cb5e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.31731199999996,0.00506,198.0 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/main.log b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/main.log deleted file mode 100644 index 7fa953bc19c6de640b62b23e7dfcc26a757fc0d2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 22:50:12,941][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 22:50:12,943][benchmark][INFO] - + Setting seed(42) -[2023-08-30 22:50:14,245][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 22:50:14,246][backend][INFO] - Configuring pytorch backend -[2023-08-30 22:50:14,246][backend][INFO] - + Checking initial device isolation -[2023-08-30 22:50:14,246][backend][INFO] - + Checking contineous device isolation -[2023-08-30 22:50:14,246][pytorch][INFO] - + Disabling gradients -[2023-08-30 22:50:14,246][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 22:50:14,951][pytorch][INFO] - + Turning on eval mode -[2023-08-30 22:50:14,952][inference][INFO] - Running inference benchmark -[2023-08-30 22:50:15,119][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 22:50:15,121][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 22:50:15,263][inference][INFO] - + Forward pass peak memory: 466.31731199999996 (MB) -[2023-08-30 22:50:15,264][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 22:50:15,266][inference][INFO] - + Warming up the forward pass -[2023-08-30 22:50:15,325][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 22:50:20,410][inference][INFO] - + Forward pass latency: 5.06e-03 (s) -[2023-08-30 22:50:20,412][inference][INFO] - + Forward pass throughput: 198.00 (samples/s) -[2023-08-30 22:50:20,413][inference][INFO] - Saving inference results -[2023-08-30 22:50:20,427][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cf0c10f895b629f0d7d1f9a4a4330403168b3f09..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f0af1f92bcbcfc9cb90b80e140371ee2e51c2d3a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.18976,0.0149,268.0 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/main.log b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/main.log deleted file mode 100644 index 996985ef87b4d092e9a38b193a2abd3c9ffb499e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-30 22:50:20,912][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 22:50:20,913][benchmark][INFO] - + Setting seed(42) -[2023-08-30 22:50:21,378][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-30 22:50:21,379][backend][INFO] - Configuring pytorch backend -[2023-08-30 22:50:21,379][backend][INFO] - + Checking initial device isolation -[2023-08-30 22:50:21,379][backend][INFO] - + Checking contineous device isolation -[2023-08-30 22:50:21,379][pytorch][INFO] - + Disabling gradients -[2023-08-30 22:50:21,380][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 22:50:21,510][pytorch][INFO] - + Turning on eval mode -[2023-08-30 22:50:21,512][inference][INFO] - Running inference benchmark -[2023-08-30 22:50:21,638][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 22:50:21,640][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 22:50:21,831][inference][INFO] - + Forward pass peak memory: 467.18976 (MB) -[2023-08-30 22:50:21,833][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-30 22:50:21,836][inference][INFO] - + Warming up the forward pass -[2023-08-30 22:50:21,935][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 22:50:26,975][inference][INFO] - + Forward pass latency: 1.49e-02 (s) -[2023-08-30 22:50:26,976][inference][INFO] - + Forward pass throughput: 268.00 (samples/s) -[2023-08-30 22:50:26,977][inference][INFO] - Saving inference results -[2023-08-30 22:50:26,985][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 693a874ee472afe015a23dc4acd24df43c3d7714..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 1657baee36ada93614a5b7d9a4695927da2df0e6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.15174399999995,0.00508,197.0,11.2,8.93 diff --git a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b9b81b74d1a1c42a58119f0fc67386a7293ee42c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-30_20:49:03_716bb2e3910fd4872064c55b0d8bc3dad754d129/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-30 22:50:36,165][benchmark][INFO] - Configuring inference benchmark -[2023-08-30 22:50:36,166][benchmark][INFO] - + Setting seed(42) -[2023-08-30 22:50:37,623][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-30 22:50:37,624][backend][INFO] - Configuring pytorch backend -[2023-08-30 22:50:37,624][backend][INFO] - + Checking initial device isolation -[2023-08-30 22:50:37,625][backend][INFO] - + Checking contineous device isolation -[2023-08-30 22:50:37,625][pytorch][INFO] - + Disabling gradients -[2023-08-30 22:50:37,625][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-30 22:50:38,307][pytorch][INFO] - + Turning on eval mode -[2023-08-30 22:50:38,307][inference][INFO] - Running inference benchmark -[2023-08-30 22:50:38,510][inference][INFO] - + Tracking forward pass peak memory -[2023-08-30 22:50:38,575][inference][INFO] - + Forward pass peak memory: 469.15174399999995 (MB) -[2023-08-30 22:50:38,577][inference][INFO] - + Warming up the forward pass -[2023-08-30 22:50:38,631][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-30 22:50:43,725][inference][INFO] - + Forward pass latency: 5.08e-03 (s) -[2023-08-30 22:50:43,728][inference][INFO] - + Forward pass throughput: 197.00 (samples/s) -[2023-08-30 22:50:43,729][inference][INFO] - + Warming up the generation pass -[2023-08-30 22:50:52,861][inference][INFO] - + Tracking generation latency and throughput -[2023-08-30 22:51:04,087][inference][INFO] - + Generation pass latency: 1.12e+01 (s) -[2023-08-30 22:51:04,088][inference][INFO] - + Generation pass throughput: 8.93 (tokens/s) -[2023-08-30 22:51:04,088][inference][INFO] - Saving inference results -[2023-08-30 22:51:04,103][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6ea87364948bfc6ed791beaf8d35774c5a0f0762..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 9e5ff9099302b322976117144185f868a567ab4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.62393599999996,0.00364,275.0 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/main.log deleted file mode 100644 index efb5dd2828bbd15823fc69c2871190ab53bb0b73..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 10:50:03,598][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:50:03,598][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:50:05,239][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 10:50:05,239][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:50:05,240][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:50:05,240][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:50:05,240][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:50:05,240][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:50:05,973][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:50:05,973][inference][INFO] - Running inference benchmark -[2023-08-31 10:50:06,092][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:50:06,093][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:50:06,154][inference][INFO] - + Forward pass peak memory: 467.62393599999996 (MB) -[2023-08-31 10:50:06,155][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:50:06,157][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:50:06,194][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:50:11,243][inference][INFO] - + Forward pass latency: 3.64e-03 (s) -[2023-08-31 10:50:11,245][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-08-31 10:50:11,245][inference][INFO] - Saving inference results -[2023-08-31 10:50:11,255][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 421dbe146b3901ab0a6736d4e48a9b18af049203..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index da9d9ee9c2097e034a2a85b23f6c96e473e501cc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.615168,0.00373,1070.0 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/main.log deleted file mode 100644 index 81def3d51669bc9ee2c214762de1c1284854c538..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 10:50:11,624][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:50:11,625][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:50:12,093][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 10:50:12,093][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:50:12,093][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:50:12,093][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:50:12,093][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:50:12,094][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:50:12,209][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:50:12,210][inference][INFO] - Running inference benchmark -[2023-08-31 10:50:12,328][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:50:12,329][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:50:12,369][inference][INFO] - + Forward pass peak memory: 468.615168 (MB) -[2023-08-31 10:50:12,370][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:50:12,372][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:50:12,413][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:50:17,459][inference][INFO] - + Forward pass latency: 3.73e-03 (s) -[2023-08-31 10:50:17,460][inference][INFO] - + Forward pass throughput: 1070.00 (samples/s) -[2023-08-31 10:50:17,460][inference][INFO] - Saving inference results -[2023-08-31 10:50:17,467][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 15d8ed496bd6d3d3d6eba8ed9db44aa882740f1f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 23105e739dc4019bbdd853f19f467d50878f30a0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.356544,0.0033,303.0,0.506,198.0 diff --git a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8d293c8d57ebb1751725c90f80aef61b6326737f..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:31:16_f8468b4facb2e46a1766a256b9fe47b0865d6854/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 10:50:22,559][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:50:22,560][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:50:23,975][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 10:50:23,975][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:50:23,975][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:50:23,975][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:50:23,976][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:50:23,976][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:50:24,608][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:50:24,608][inference][INFO] - Running inference benchmark -[2023-08-31 10:50:24,799][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:50:24,845][inference][INFO] - + Forward pass peak memory: 469.356544 (MB) -[2023-08-31 10:50:24,847][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:50:24,899][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:50:29,950][inference][INFO] - + Forward pass latency: 3.30e-03 (s) -[2023-08-31 10:50:29,952][inference][INFO] - + Forward pass throughput: 303.00 (samples/s) -[2023-08-31 10:50:29,952][inference][INFO] - + Warming up the generation pass -[2023-08-31 10:50:30,444][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 10:50:35,506][inference][INFO] - + Generation pass latency: 5.06e-01 (s) -[2023-08-31 10:50:35,507][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s) -[2023-08-31 10:50:35,508][inference][INFO] - Saving inference results -[2023-08-31 10:50:35,518][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c6b650cf6f9a51840119a08160df543070505dcf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6c76787ea30fefa22d4da91893931fb9b74448bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.06278399999997,0.00392,255.0 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/main.log deleted file mode 100644 index 3c3a640ee7814eb58f5b377bc01936589cf1d372..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 10:51:44,825][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:51:44,826][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:51:46,067][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 10:51:46,067][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:51:46,067][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:51:46,067][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:51:46,067][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:51:46,068][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:51:46,686][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:51:46,687][inference][INFO] - Running inference benchmark -[2023-08-31 10:51:46,815][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:51:46,816][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:51:46,877][inference][INFO] - + Forward pass peak memory: 467.06278399999997 (MB) -[2023-08-31 10:51:46,878][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:51:46,880][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:51:46,914][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:51:51,958][inference][INFO] - + Forward pass latency: 3.92e-03 (s) -[2023-08-31 10:51:51,959][inference][INFO] - + Forward pass throughput: 255.00 (samples/s) -[2023-08-31 10:51:51,959][inference][INFO] - Saving inference results -[2023-08-31 10:51:51,970][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ad93028f6c301fd9f85d01c3fb33f53c97ce8a83..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b8f22c7f3bb7f5315578b5221aeeec4bb7740ad4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.992576,0.00423,946.0 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/main.log deleted file mode 100644 index d7d8f8971fa2164717905cd49330235497b9d670..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 10:51:52,353][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:51:52,354][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:51:52,789][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 10:51:52,789][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:51:52,789][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:51:52,789][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:51:52,790][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:51:52,790][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:51:52,910][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:51:52,910][inference][INFO] - Running inference benchmark -[2023-08-31 10:51:53,036][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:51:53,037][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:51:53,077][inference][INFO] - + Forward pass peak memory: 467.992576 (MB) -[2023-08-31 10:51:53,078][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:51:53,079][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:51:53,121][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:51:58,162][inference][INFO] - + Forward pass latency: 4.23e-03 (s) -[2023-08-31 10:51:58,163][inference][INFO] - + Forward pass throughput: 946.00 (samples/s) -[2023-08-31 10:51:58,163][inference][INFO] - Saving inference results -[2023-08-31 10:51:58,172][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index bbee1aa3e38b07d352dd9cfe29904d0dc2ee665a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 80ef9812b6674e72decfa8ba87c4606dc4d97a58..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.303296,0.00384,260.0,0.497,201.0 diff --git a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ed23d9f83b3a2ac8e791e98240c4ae2d9680d9d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:47:53_e95bcaeef0bd6b084b7615faae411a14d50bcfee/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 10:52:03,133][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:52:03,134][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:52:04,650][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 10:52:04,651][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:52:04,651][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:52:04,651][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:52:04,651][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:52:04,651][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:52:05,350][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:52:05,351][inference][INFO] - Running inference benchmark -[2023-08-31 10:52:05,547][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:52:05,592][inference][INFO] - + Forward pass peak memory: 469.303296 (MB) -[2023-08-31 10:52:05,593][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:52:05,635][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:52:10,678][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-08-31 10:52:10,680][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-08-31 10:52:10,681][inference][INFO] - + Warming up the generation pass -[2023-08-31 10:52:11,172][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 10:52:16,645][inference][INFO] - + Generation pass latency: 4.97e-01 (s) -[2023-08-31 10:52:16,646][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-08-31 10:52:16,647][inference][INFO] - Saving inference results -[2023-08-31 10:52:16,658][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fb20382835aae29e6f1c5b5acc6eae41733aa712..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index ea0b192bf987479ddc6fc89b441bdda9dfdb2f88..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.78425599999997,0.00314,318.0 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/main.log deleted file mode 100644 index 2040dfd4127fb6911a547996b8df49d61cacea41..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 10:53:24,889][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:53:24,890][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:53:26,374][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 10:53:26,374][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:53:26,374][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:53:26,375][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:53:26,375][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:53:26,375][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:53:27,208][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:53:27,209][inference][INFO] - Running inference benchmark -[2023-08-31 10:53:27,340][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:53:27,341][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:53:27,396][inference][INFO] - + Forward pass peak memory: 466.78425599999997 (MB) -[2023-08-31 10:53:27,397][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:53:27,399][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:53:27,435][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:53:32,486][inference][INFO] - + Forward pass latency: 3.14e-03 (s) -[2023-08-31 10:53:32,487][inference][INFO] - + Forward pass throughput: 318.00 (samples/s) -[2023-08-31 10:53:32,488][inference][INFO] - Saving inference results -[2023-08-31 10:53:32,499][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ad26523c48be91984c9a818e53bb1be2e691043b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b6a8e4bf9c23ac975a80f78f769ce196d2b6bc17..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.836928,0.00344,1160.0 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/main.log deleted file mode 100644 index b4abc25796ecb9e8ad4fecf060eedb8ca07e6d80..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 10:53:32,874][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:53:32,874][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:53:33,303][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 10:53:33,304][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:53:33,304][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:53:33,304][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:53:33,304][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:53:33,304][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:53:33,416][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:53:33,417][inference][INFO] - Running inference benchmark -[2023-08-31 10:53:33,537][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:53:33,539][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:53:33,577][inference][INFO] - + Forward pass peak memory: 467.836928 (MB) -[2023-08-31 10:53:33,577][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 10:53:33,579][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:53:33,614][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:53:38,660][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-08-31 10:53:38,661][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-08-31 10:53:38,661][inference][INFO] - Saving inference results -[2023-08-31 10:53:38,669][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index cd97caa39154a572ebed2e1cec06856451d3d963..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a5db779502c495d53b207343026515dbd2a8d565..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.48352,0.00316,316.0,0.484,207.0 diff --git a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 245081c58e5d14aac9b01df7b5ff8d22c8853287..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_09:55:10_99fc3ac8ac2d79f19e983b63c2992b78f4509111/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 10:53:43,540][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 10:53:43,541][benchmark][INFO] - + Setting seed(42) -[2023-08-31 10:53:45,272][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 10:53:45,272][backend][INFO] - Configuring pytorch backend -[2023-08-31 10:53:45,272][backend][INFO] - + Checking initial device isolation -[2023-08-31 10:53:45,273][backend][INFO] - + Checking contineous device isolation -[2023-08-31 10:53:45,273][pytorch][INFO] - + Disabling gradients -[2023-08-31 10:53:45,273][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 10:53:45,899][pytorch][INFO] - + Turning on eval mode -[2023-08-31 10:53:45,900][inference][INFO] - Running inference benchmark -[2023-08-31 10:53:46,092][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 10:53:46,139][inference][INFO] - + Forward pass peak memory: 469.48352 (MB) -[2023-08-31 10:53:46,141][inference][INFO] - + Warming up the forward pass -[2023-08-31 10:53:46,177][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 10:53:51,228][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-08-31 10:53:51,229][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-08-31 10:53:51,229][inference][INFO] - + Warming up the generation pass -[2023-08-31 10:53:51,724][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 10:53:57,046][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-08-31 10:53:57,047][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-08-31 10:53:57,047][inference][INFO] - Saving inference results -[2023-08-31 10:53:57,058][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9a51e454a3b369f41a0b3f92c6be3ee58166df69..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index fda65c3d7274df434cd3f7310973bf7780966203..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.54668799999996,0.0035,286.0 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/main.log deleted file mode 100644 index ac6a3d61bf802e93704b3c73eec11e42bedb9291..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 12:58:54,880][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 12:58:54,881][benchmark][INFO] - + Setting seed(42) -[2023-08-31 12:58:56,091][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 12:58:56,091][backend][INFO] - Configuring pytorch backend -[2023-08-31 12:58:56,091][backend][INFO] - + Checking initial device isolation -[2023-08-31 12:58:56,091][backend][INFO] - + Checking contineous device isolation -[2023-08-31 12:58:56,091][pytorch][INFO] - + Disabling gradients -[2023-08-31 12:58:56,092][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 12:58:56,721][pytorch][INFO] - + Turning on eval mode -[2023-08-31 12:58:56,722][inference][INFO] - Running inference benchmark -[2023-08-31 12:58:56,845][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 12:58:56,846][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 12:58:57,058][inference][INFO] - + Forward pass peak memory: 466.54668799999996 (MB) -[2023-08-31 12:58:57,059][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 12:58:57,061][inference][INFO] - + Warming up the forward pass -[2023-08-31 12:58:57,103][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 12:59:02,153][inference][INFO] - + Forward pass latency: 3.50e-03 (s) -[2023-08-31 12:59:02,155][inference][INFO] - + Forward pass throughput: 286.00 (samples/s) -[2023-08-31 12:59:02,155][inference][INFO] - Saving inference results -[2023-08-31 12:59:02,165][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1a2a88913b313441b4f5dbd347bbe03d3a23b617..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f81ed38537a09de0f1006e8ee8570c3fbc69fd68..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.443712,0.00429,932.0 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/main.log deleted file mode 100644 index f2c0b2a3ed344c85b636c26417ec198ca7ef73ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 12:59:02,534][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 12:59:02,534][benchmark][INFO] - + Setting seed(42) -[2023-08-31 12:59:02,964][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 12:59:02,965][backend][INFO] - Configuring pytorch backend -[2023-08-31 12:59:02,965][backend][INFO] - + Checking initial device isolation -[2023-08-31 12:59:02,965][backend][INFO] - + Checking contineous device isolation -[2023-08-31 12:59:02,965][pytorch][INFO] - + Disabling gradients -[2023-08-31 12:59:02,965][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 12:59:03,081][pytorch][INFO] - + Turning on eval mode -[2023-08-31 12:59:03,081][inference][INFO] - Running inference benchmark -[2023-08-31 12:59:03,200][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 12:59:03,201][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 12:59:03,240][inference][INFO] - + Forward pass peak memory: 467.443712 (MB) -[2023-08-31 12:59:03,240][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 12:59:03,242][inference][INFO] - + Warming up the forward pass -[2023-08-31 12:59:03,289][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 12:59:08,327][inference][INFO] - + Forward pass latency: 4.29e-03 (s) -[2023-08-31 12:59:08,328][inference][INFO] - + Forward pass throughput: 932.00 (samples/s) -[2023-08-31 12:59:08,328][inference][INFO] - Saving inference results -[2023-08-31 12:59:08,336][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c2a3a71ab67e5b2653a1d4426759b6a8137fce65..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f93b713171c6e612de95312e430cdfbc77804822..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.458944,0.00325,308.0,0.492,203.0 diff --git a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ea9932aa6a7099cac3c926a265718a7dac28b1cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_12:17:26_3b39b906183ed08d9961908eb73104aeea345d11/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 12:59:13,287][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 12:59:13,288][benchmark][INFO] - + Setting seed(42) -[2023-08-31 12:59:14,756][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 12:59:14,756][backend][INFO] - Configuring pytorch backend -[2023-08-31 12:59:14,756][backend][INFO] - + Checking initial device isolation -[2023-08-31 12:59:14,757][backend][INFO] - + Checking contineous device isolation -[2023-08-31 12:59:14,757][pytorch][INFO] - + Disabling gradients -[2023-08-31 12:59:14,757][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 12:59:15,404][pytorch][INFO] - + Turning on eval mode -[2023-08-31 12:59:15,405][inference][INFO] - Running inference benchmark -[2023-08-31 12:59:15,724][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 12:59:15,771][inference][INFO] - + Forward pass peak memory: 469.458944 (MB) -[2023-08-31 12:59:15,772][inference][INFO] - + Warming up the forward pass -[2023-08-31 12:59:15,806][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 12:59:20,855][inference][INFO] - + Forward pass latency: 3.25e-03 (s) -[2023-08-31 12:59:20,856][inference][INFO] - + Forward pass throughput: 308.00 (samples/s) -[2023-08-31 12:59:20,857][inference][INFO] - + Warming up the generation pass -[2023-08-31 12:59:21,358][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 12:59:26,774][inference][INFO] - + Generation pass latency: 4.92e-01 (s) -[2023-08-31 12:59:26,775][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-31 12:59:26,775][inference][INFO] - Saving inference results -[2023-08-31 12:59:26,788][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 964c9a08f0c3c5540345cf922969747167b7b6fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index f7adea0005a55717d79a316e30bf19ae22621810..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.65318399999995,0.0042,238.0 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 4722563797967538f6b6bdd575a1f79520c761d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 14:49:57,716][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 14:49:57,716][benchmark][INFO] - + Setting seed(42) -[2023-08-31 14:49:58,995][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 14:49:58,995][backend][INFO] - Configuring pytorch backend -[2023-08-31 14:49:58,995][backend][INFO] - + Checking initial device isolation -[2023-08-31 14:49:58,995][backend][INFO] - + Checking contineous device isolation -[2023-08-31 14:49:58,995][pytorch][INFO] - + Disabling gradients -[2023-08-31 14:49:58,996][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 14:49:59,617][pytorch][INFO] - + Turning on eval mode -[2023-08-31 14:49:59,618][inference][INFO] - Running inference benchmark -[2023-08-31 14:49:59,739][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:49:59,741][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 14:49:59,804][inference][INFO] - + Forward pass peak memory: 466.65318399999995 (MB) -[2023-08-31 14:49:59,805][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:49:59,807][inference][INFO] - + Warming up the forward pass -[2023-08-31 14:49:59,843][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 14:50:04,892][inference][INFO] - + Forward pass latency: 4.20e-03 (s) -[2023-08-31 14:50:04,893][inference][INFO] - + Forward pass throughput: 238.00 (samples/s) -[2023-08-31 14:50:04,893][inference][INFO] - Saving inference results -[2023-08-31 14:50:04,906][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d289708e65190347d2296e7f814092de21de49eb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f7a7376417877efdf8ada320ae69a92fb53ffab0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.550208,0.00397,1010.0 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/main.log deleted file mode 100644 index a65708202537801a92919fe2c9807562faf27b4c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 14:50:05,373][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 14:50:05,374][benchmark][INFO] - + Setting seed(42) -[2023-08-31 14:50:05,803][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 14:50:05,803][backend][INFO] - Configuring pytorch backend -[2023-08-31 14:50:05,803][backend][INFO] - + Checking initial device isolation -[2023-08-31 14:50:05,804][backend][INFO] - + Checking contineous device isolation -[2023-08-31 14:50:05,804][pytorch][INFO] - + Disabling gradients -[2023-08-31 14:50:05,804][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 14:50:05,921][pytorch][INFO] - + Turning on eval mode -[2023-08-31 14:50:05,922][inference][INFO] - Running inference benchmark -[2023-08-31 14:50:06,054][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:50:06,055][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 14:50:06,095][inference][INFO] - + Forward pass peak memory: 467.550208 (MB) -[2023-08-31 14:50:06,096][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:50:06,098][inference][INFO] - + Warming up the forward pass -[2023-08-31 14:50:06,147][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 14:50:11,189][inference][INFO] - + Forward pass latency: 3.97e-03 (s) -[2023-08-31 14:50:11,190][inference][INFO] - + Forward pass throughput: 1010.00 (samples/s) -[2023-08-31 14:50:11,190][inference][INFO] - Saving inference results -[2023-08-31 14:50:11,198][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ab6ba4fc7789bddbef1b597717c7b81beec6f158..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d170d5925503f967615b7721888902c76ce08859..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.458944,0.00312,321.0,0.485,206.0 diff --git a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 96c3befa754058d86c71f43e1bd9dfe433c98e7a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:06:56_a39ebbf87978fe3b129efbf9d4a6dfeefcacf08c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 14:50:16,111][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 14:50:16,113][benchmark][INFO] - + Setting seed(42) -[2023-08-31 14:50:17,544][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 14:50:17,545][backend][INFO] - Configuring pytorch backend -[2023-08-31 14:50:17,545][backend][INFO] - + Checking initial device isolation -[2023-08-31 14:50:17,545][backend][INFO] - + Checking contineous device isolation -[2023-08-31 14:50:17,545][pytorch][INFO] - + Disabling gradients -[2023-08-31 14:50:17,546][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 14:50:18,533][pytorch][INFO] - + Turning on eval mode -[2023-08-31 14:50:18,534][inference][INFO] - Running inference benchmark -[2023-08-31 14:50:18,826][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 14:50:18,875][inference][INFO] - + Forward pass peak memory: 469.458944 (MB) -[2023-08-31 14:50:18,877][inference][INFO] - + Warming up the forward pass -[2023-08-31 14:50:18,911][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 14:50:23,965][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-08-31 14:50:23,966][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-08-31 14:50:23,967][inference][INFO] - + Warming up the generation pass -[2023-08-31 14:50:24,467][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 14:50:29,800][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-08-31 14:50:29,801][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-08-31 14:50:29,801][inference][INFO] - Saving inference results -[2023-08-31 14:50:29,811][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1efe4bc6cf4ee61eedb04a898c7f770df756823a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 447684b091636019b3cc4062623dd5006a3b9575..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.095552,0.00374,267.0 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/main.log deleted file mode 100644 index 84d20fe5119a385b1b2d39c6be1585580dd90c34..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 14:51:40,269][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 14:51:40,270][benchmark][INFO] - + Setting seed(42) -[2023-08-31 14:51:41,615][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 14:51:41,615][backend][INFO] - Configuring pytorch backend -[2023-08-31 14:51:41,615][backend][INFO] - + Checking initial device isolation -[2023-08-31 14:51:41,616][backend][INFO] - + Checking contineous device isolation -[2023-08-31 14:51:41,616][pytorch][INFO] - + Disabling gradients -[2023-08-31 14:51:41,616][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 14:51:42,233][pytorch][INFO] - + Turning on eval mode -[2023-08-31 14:51:42,233][inference][INFO] - Running inference benchmark -[2023-08-31 14:51:42,357][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:51:42,359][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 14:51:42,419][inference][INFO] - + Forward pass peak memory: 467.095552 (MB) -[2023-08-31 14:51:42,420][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:51:42,422][inference][INFO] - + Warming up the forward pass -[2023-08-31 14:51:42,454][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 14:51:47,501][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-08-31 14:51:47,503][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-08-31 14:51:47,503][inference][INFO] - Saving inference results -[2023-08-31 14:51:47,513][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d8b401d2e29156a32d030e7d9f11195e8deddabb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e626831784f0198458cf73580b5b44d68c0ee69e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.03353599999997,0.00419,955.0 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9dfcf8c8030d3e62aa434ee78366f3223c3efa8d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 14:51:47,891][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 14:51:47,893][benchmark][INFO] - + Setting seed(42) -[2023-08-31 14:51:48,331][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 14:51:48,331][backend][INFO] - Configuring pytorch backend -[2023-08-31 14:51:48,331][backend][INFO] - + Checking initial device isolation -[2023-08-31 14:51:48,331][backend][INFO] - + Checking contineous device isolation -[2023-08-31 14:51:48,332][pytorch][INFO] - + Disabling gradients -[2023-08-31 14:51:48,332][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 14:51:48,453][pytorch][INFO] - + Turning on eval mode -[2023-08-31 14:51:48,453][inference][INFO] - Running inference benchmark -[2023-08-31 14:51:48,577][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:51:48,579][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 14:51:48,620][inference][INFO] - + Forward pass peak memory: 468.03353599999997 (MB) -[2023-08-31 14:51:48,621][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 14:51:48,623][inference][INFO] - + Warming up the forward pass -[2023-08-31 14:51:48,665][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 14:51:53,707][inference][INFO] - + Forward pass latency: 4.19e-03 (s) -[2023-08-31 14:51:53,708][inference][INFO] - + Forward pass throughput: 955.00 (samples/s) -[2023-08-31 14:51:53,709][inference][INFO] - Saving inference results -[2023-08-31 14:51:53,715][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 983d15edf61fe8a0c437a55b5c5e5e5829d769e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 929fd98e782fa69189b6b88d4864f9cf481a3628..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.184512,0.00307,326.0,0.536,187.0 diff --git a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 310af78494b1891d33a251bfe09e8ae741691af8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_14:08:20_2be8a9098e06262bdd5c16b5e8a70f145df88e96/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 14:51:58,619][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 14:51:58,620][benchmark][INFO] - + Setting seed(42) -[2023-08-31 14:52:00,031][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 14:52:00,032][backend][INFO] - Configuring pytorch backend -[2023-08-31 14:52:00,032][backend][INFO] - + Checking initial device isolation -[2023-08-31 14:52:00,032][backend][INFO] - + Checking contineous device isolation -[2023-08-31 14:52:00,032][pytorch][INFO] - + Disabling gradients -[2023-08-31 14:52:00,032][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 14:52:00,669][pytorch][INFO] - + Turning on eval mode -[2023-08-31 14:52:00,670][inference][INFO] - Running inference benchmark -[2023-08-31 14:52:00,867][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 14:52:00,921][inference][INFO] - + Forward pass peak memory: 469.184512 (MB) -[2023-08-31 14:52:00,922][inference][INFO] - + Warming up the forward pass -[2023-08-31 14:52:00,954][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 14:52:06,009][inference][INFO] - + Forward pass latency: 3.07e-03 (s) -[2023-08-31 14:52:06,010][inference][INFO] - + Forward pass throughput: 326.00 (samples/s) -[2023-08-31 14:52:06,011][inference][INFO] - + Warming up the generation pass -[2023-08-31 14:52:06,506][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 14:52:11,864][inference][INFO] - + Generation pass latency: 5.36e-01 (s) -[2023-08-31 14:52:11,865][inference][INFO] - + Generation pass throughput: 187.00 (tokens/s) -[2023-08-31 14:52:11,865][inference][INFO] - Saving inference results -[2023-08-31 14:52:11,875][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 8de912ba193716fe08c2b1f937a68347d7b8a35e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 044aa0f6b886bafabedd6fe05ef14ad0b8412cd6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.78425599999997,0.00322,311.0 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/main.log deleted file mode 100644 index 48a54825d49e397bc6a2142c7e8f686329a5e34d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 16:49:46,110][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 16:49:46,111][benchmark][INFO] - + Setting seed(42) -[2023-08-31 16:49:47,326][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 16:49:47,327][backend][INFO] - Configuring pytorch backend -[2023-08-31 16:49:47,327][backend][INFO] - + Checking initial device isolation -[2023-08-31 16:49:47,327][backend][INFO] - + Checking contineous device isolation -[2023-08-31 16:49:47,327][pytorch][INFO] - + Disabling gradients -[2023-08-31 16:49:47,327][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 16:49:47,974][pytorch][INFO] - + Turning on eval mode -[2023-08-31 16:49:47,975][inference][INFO] - Running inference benchmark -[2023-08-31 16:49:48,098][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:49:48,100][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 16:49:48,166][inference][INFO] - + Forward pass peak memory: 466.78425599999997 (MB) -[2023-08-31 16:49:48,167][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:49:48,169][inference][INFO] - + Warming up the forward pass -[2023-08-31 16:49:48,204][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 16:49:53,254][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-08-31 16:49:53,255][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-08-31 16:49:53,255][inference][INFO] - Saving inference results -[2023-08-31 16:49:53,266][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index bc9718a4295e3e22c173f73ffe3fb7ee7c24569a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 345ebbfe8f59b53cb8e058f90b538f1d516dac88..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.80006399999996,0.00356,1120.0 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8245de4d4c0edd0a245fc4c52bdf275edc8df81d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 16:49:53,640][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 16:49:53,641][benchmark][INFO] - + Setting seed(42) -[2023-08-31 16:49:54,076][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 16:49:54,076][backend][INFO] - Configuring pytorch backend -[2023-08-31 16:49:54,076][backend][INFO] - + Checking initial device isolation -[2023-08-31 16:49:54,076][backend][INFO] - + Checking contineous device isolation -[2023-08-31 16:49:54,077][pytorch][INFO] - + Disabling gradients -[2023-08-31 16:49:54,077][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 16:49:54,191][pytorch][INFO] - + Turning on eval mode -[2023-08-31 16:49:54,192][inference][INFO] - Running inference benchmark -[2023-08-31 16:49:54,311][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:49:54,312][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 16:49:54,351][inference][INFO] - + Forward pass peak memory: 467.80006399999996 (MB) -[2023-08-31 16:49:54,352][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:49:54,353][inference][INFO] - + Warming up the forward pass -[2023-08-31 16:49:54,389][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 16:49:59,434][inference][INFO] - + Forward pass latency: 3.56e-03 (s) -[2023-08-31 16:49:59,435][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-08-31 16:49:59,435][inference][INFO] - Saving inference results -[2023-08-31 16:49:59,442][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7ac2a9e42bb5e18251d2c947e0bdbff391931a40..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 952cd45366260c3d26113792e97d1db4c6a13066..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.33196799999996,0.00397,252.0,0.492,203.0 diff --git a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4f445847bee3310d845aef8d8af9fc4c632cee25..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_15:01:27_9c5acca0028b550e1328ba7e2f16418fe0a0c634/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 16:50:04,314][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 16:50:04,316][benchmark][INFO] - + Setting seed(42) -[2023-08-31 16:50:06,175][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 16:50:06,176][backend][INFO] - Configuring pytorch backend -[2023-08-31 16:50:06,176][backend][INFO] - + Checking initial device isolation -[2023-08-31 16:50:06,176][backend][INFO] - + Checking contineous device isolation -[2023-08-31 16:50:06,176][pytorch][INFO] - + Disabling gradients -[2023-08-31 16:50:06,176][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 16:50:06,884][pytorch][INFO] - + Turning on eval mode -[2023-08-31 16:50:06,885][inference][INFO] - Running inference benchmark -[2023-08-31 16:50:07,102][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 16:50:07,147][inference][INFO] - + Forward pass peak memory: 469.33196799999996 (MB) -[2023-08-31 16:50:07,148][inference][INFO] - + Warming up the forward pass -[2023-08-31 16:50:07,183][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 16:50:12,226][inference][INFO] - + Forward pass latency: 3.97e-03 (s) -[2023-08-31 16:50:12,227][inference][INFO] - + Forward pass throughput: 252.00 (samples/s) -[2023-08-31 16:50:12,228][inference][INFO] - + Warming up the generation pass -[2023-08-31 16:50:12,774][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 16:50:18,192][inference][INFO] - + Generation pass latency: 4.92e-01 (s) -[2023-08-31 16:50:18,193][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-31 16:50:18,193][inference][INFO] - Saving inference results -[2023-08-31 16:50:18,204][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a1bce1fade4b9c567ce23398dee3733f089ad068..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0f8bd885feb0f8efcd8dd4957573326e07e4e317..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.214336,0.00325,308.0 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1a40f5848524eed7f572adcd4c86daeb0abfba02..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 16:51:28,399][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 16:51:28,400][benchmark][INFO] - + Setting seed(42) -[2023-08-31 16:51:29,727][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 16:51:29,727][backend][INFO] - Configuring pytorch backend -[2023-08-31 16:51:29,727][backend][INFO] - + Checking initial device isolation -[2023-08-31 16:51:29,728][backend][INFO] - + Checking contineous device isolation -[2023-08-31 16:51:29,728][pytorch][INFO] - + Disabling gradients -[2023-08-31 16:51:29,728][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 16:51:30,333][pytorch][INFO] - + Turning on eval mode -[2023-08-31 16:51:30,334][inference][INFO] - Running inference benchmark -[2023-08-31 16:51:30,455][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:51:30,457][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 16:51:30,521][inference][INFO] - + Forward pass peak memory: 467.214336 (MB) -[2023-08-31 16:51:30,522][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:51:30,524][inference][INFO] - + Warming up the forward pass -[2023-08-31 16:51:30,561][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 16:51:35,614][inference][INFO] - + Forward pass latency: 3.25e-03 (s) -[2023-08-31 16:51:35,615][inference][INFO] - + Forward pass throughput: 308.00 (samples/s) -[2023-08-31 16:51:35,615][inference][INFO] - Saving inference results -[2023-08-31 16:51:35,624][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8021a126571c0a2be3b24e7fe3327f5280a37003..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ca4ac8fff62b72492c1e5e94bea3718370fa90a6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.226048,0.00503,795.0 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1dee5b5c93b84511c6c23b6010fb0caa9861afd5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 16:51:35,988][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 16:51:35,989][benchmark][INFO] - + Setting seed(42) -[2023-08-31 16:51:36,424][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 16:51:36,425][backend][INFO] - Configuring pytorch backend -[2023-08-31 16:51:36,425][backend][INFO] - + Checking initial device isolation -[2023-08-31 16:51:36,425][backend][INFO] - + Checking contineous device isolation -[2023-08-31 16:51:36,425][pytorch][INFO] - + Disabling gradients -[2023-08-31 16:51:36,425][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 16:51:36,553][pytorch][INFO] - + Turning on eval mode -[2023-08-31 16:51:36,553][inference][INFO] - Running inference benchmark -[2023-08-31 16:51:36,674][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:51:36,675][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 16:51:36,717][inference][INFO] - + Forward pass peak memory: 468.226048 (MB) -[2023-08-31 16:51:36,718][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 16:51:36,719][inference][INFO] - + Warming up the forward pass -[2023-08-31 16:51:36,770][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 16:51:41,808][inference][INFO] - + Forward pass latency: 5.03e-03 (s) -[2023-08-31 16:51:41,809][inference][INFO] - + Forward pass throughput: 795.00 (samples/s) -[2023-08-31 16:51:41,809][inference][INFO] - Saving inference results -[2023-08-31 16:51:41,818][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 68a3bc5f119d25a21a07fabe8b5cfdc8ac3e55af..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 73537c8a0899dd5f03a71d29359c4868a3fe1b48..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.84806399999997,0.00387,258.0,0.548,182.0 diff --git a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 3ba99e76f45f612732673b59b879927330e28fcd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:00:03_eaf5e98ec03d73c24367438100b05c02ce5ad10c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 16:51:46,565][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 16:51:46,566][benchmark][INFO] - + Setting seed(42) -[2023-08-31 16:51:47,967][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 16:51:47,968][backend][INFO] - Configuring pytorch backend -[2023-08-31 16:51:47,968][backend][INFO] - + Checking initial device isolation -[2023-08-31 16:51:47,968][backend][INFO] - + Checking contineous device isolation -[2023-08-31 16:51:47,969][pytorch][INFO] - + Disabling gradients -[2023-08-31 16:51:47,969][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 16:51:48,617][pytorch][INFO] - + Turning on eval mode -[2023-08-31 16:51:48,618][inference][INFO] - Running inference benchmark -[2023-08-31 16:51:48,815][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 16:51:48,863][inference][INFO] - + Forward pass peak memory: 469.84806399999997 (MB) -[2023-08-31 16:51:48,864][inference][INFO] - + Warming up the forward pass -[2023-08-31 16:51:48,901][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 16:51:53,946][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-08-31 16:51:53,948][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-08-31 16:51:53,949][inference][INFO] - + Warming up the generation pass -[2023-08-31 16:51:54,493][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 16:51:59,975][inference][INFO] - + Generation pass latency: 5.48e-01 (s) -[2023-08-31 16:51:59,976][inference][INFO] - + Generation pass throughput: 182.00 (tokens/s) -[2023-08-31 16:51:59,977][inference][INFO] - Saving inference results -[2023-08-31 16:51:59,988][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0f73bffc0b93a04ce4fec7f1f8b83ec731c6ec16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 798cc57e63523a1fcc642b5494971dc22629d774..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.415616,0.00336,298.0 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0e99c25510536d9f1351f346caf3b5c8f29f9475..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 18:50:07,746][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 18:50:07,746][benchmark][INFO] - + Setting seed(42) -[2023-08-31 18:50:09,104][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 18:50:09,105][backend][INFO] - Configuring pytorch backend -[2023-08-31 18:50:09,105][backend][INFO] - + Checking initial device isolation -[2023-08-31 18:50:09,105][backend][INFO] - + Checking contineous device isolation -[2023-08-31 18:50:09,105][pytorch][INFO] - + Disabling gradients -[2023-08-31 18:50:09,105][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 18:50:09,735][pytorch][INFO] - + Turning on eval mode -[2023-08-31 18:50:09,735][inference][INFO] - Running inference benchmark -[2023-08-31 18:50:09,855][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:50:09,857][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 18:50:09,918][inference][INFO] - + Forward pass peak memory: 466.415616 (MB) -[2023-08-31 18:50:09,919][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:50:09,921][inference][INFO] - + Warming up the forward pass -[2023-08-31 18:50:09,952][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 18:50:15,009][inference][INFO] - + Forward pass latency: 3.36e-03 (s) -[2023-08-31 18:50:15,011][inference][INFO] - + Forward pass throughput: 298.00 (samples/s) -[2023-08-31 18:50:15,012][inference][INFO] - Saving inference results -[2023-08-31 18:50:15,027][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 96d6f9c3b9863c543122d04291c8afd9427321d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3ad8b7dd3ac5369440b94ef08590af4dd0f6dcb7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.431424,0.0034,1180.0 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/main.log deleted file mode 100644 index b2d78a629dec11d83a864716218672cbe516c6d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 18:50:15,598][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 18:50:15,599][benchmark][INFO] - + Setting seed(42) -[2023-08-31 18:50:16,234][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 18:50:16,235][backend][INFO] - Configuring pytorch backend -[2023-08-31 18:50:16,235][backend][INFO] - + Checking initial device isolation -[2023-08-31 18:50:16,235][backend][INFO] - + Checking contineous device isolation -[2023-08-31 18:50:16,235][pytorch][INFO] - + Disabling gradients -[2023-08-31 18:50:16,235][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 18:50:16,355][pytorch][INFO] - + Turning on eval mode -[2023-08-31 18:50:16,355][inference][INFO] - Running inference benchmark -[2023-08-31 18:50:16,475][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:50:16,476][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 18:50:16,516][inference][INFO] - + Forward pass peak memory: 467.431424 (MB) -[2023-08-31 18:50:16,517][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:50:16,519][inference][INFO] - + Warming up the forward pass -[2023-08-31 18:50:16,553][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 18:50:21,599][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-08-31 18:50:21,601][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-08-31 18:50:21,601][inference][INFO] - Saving inference results -[2023-08-31 18:50:21,608][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9dcf8588945361c1f3fdecca35a5c5b4a96e81d2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 7fcae038dfb87c8a74abb3f7ef8f6a3716ad13c5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.61459199999996,0.00408,245.0,0.501,200.0 diff --git a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 725ee56c0755d61c8119bae282e7b8483e970063..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_16:54:01_3fb1535b09901db72a41095c007c29bcdf02e3ae/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 18:50:26,490][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 18:50:26,491][benchmark][INFO] - + Setting seed(42) -[2023-08-31 18:50:28,029][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 18:50:28,029][backend][INFO] - Configuring pytorch backend -[2023-08-31 18:50:28,030][backend][INFO] - + Checking initial device isolation -[2023-08-31 18:50:28,030][backend][INFO] - + Checking contineous device isolation -[2023-08-31 18:50:28,030][pytorch][INFO] - + Disabling gradients -[2023-08-31 18:50:28,030][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 18:50:28,659][pytorch][INFO] - + Turning on eval mode -[2023-08-31 18:50:28,659][inference][INFO] - Running inference benchmark -[2023-08-31 18:50:28,849][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 18:50:28,895][inference][INFO] - + Forward pass peak memory: 469.61459199999996 (MB) -[2023-08-31 18:50:28,897][inference][INFO] - + Warming up the forward pass -[2023-08-31 18:50:28,939][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 18:50:33,987][inference][INFO] - + Forward pass latency: 4.08e-03 (s) -[2023-08-31 18:50:33,989][inference][INFO] - + Forward pass throughput: 245.00 (samples/s) -[2023-08-31 18:50:33,989][inference][INFO] - + Warming up the generation pass -[2023-08-31 18:50:34,581][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 18:50:39,591][inference][INFO] - + Generation pass latency: 5.01e-01 (s) -[2023-08-31 18:50:39,592][inference][INFO] - + Generation pass throughput: 200.00 (tokens/s) -[2023-08-31 18:50:39,592][inference][INFO] - Saving inference results -[2023-08-31 18:50:39,621][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b87c7f7f40bcec4c431c99b86f8425934344db39..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 094687f111d88b2441d1a6ce6f2ac437df07248e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.434944,0.00607,165.0 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/main.log deleted file mode 100644 index b3c60352db937dfdf4bd9db977afe69cfc8e537c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 18:51:53,330][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 18:51:53,331][benchmark][INFO] - + Setting seed(42) -[2023-08-31 18:51:54,573][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 18:51:54,573][backend][INFO] - Configuring pytorch backend -[2023-08-31 18:51:54,573][backend][INFO] - + Checking initial device isolation -[2023-08-31 18:51:54,573][backend][INFO] - + Checking contineous device isolation -[2023-08-31 18:51:54,573][pytorch][INFO] - + Disabling gradients -[2023-08-31 18:51:54,574][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 18:51:55,194][pytorch][INFO] - + Turning on eval mode -[2023-08-31 18:51:55,194][inference][INFO] - Running inference benchmark -[2023-08-31 18:51:55,316][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:51:55,318][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 18:51:55,394][inference][INFO] - + Forward pass peak memory: 468.434944 (MB) -[2023-08-31 18:51:55,396][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:51:55,399][inference][INFO] - + Warming up the forward pass -[2023-08-31 18:51:55,501][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 18:52:00,524][inference][INFO] - + Forward pass latency: 6.07e-03 (s) -[2023-08-31 18:52:00,525][inference][INFO] - + Forward pass throughput: 165.00 (samples/s) -[2023-08-31 18:52:00,526][inference][INFO] - Saving inference results -[2023-08-31 18:52:00,538][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cb2991e0dd0df9db9e14c210d829dc8d391be6b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a3d1c374eb848c3481eb7fa4e78febfa303c597a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.46304,0.00499,802.0 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4820c95179316d664c7c718c646bf82c7e891914..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 18:52:00,928][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 18:52:00,929][benchmark][INFO] - + Setting seed(42) -[2023-08-31 18:52:01,359][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 18:52:01,360][backend][INFO] - Configuring pytorch backend -[2023-08-31 18:52:01,360][backend][INFO] - + Checking initial device isolation -[2023-08-31 18:52:01,360][backend][INFO] - + Checking contineous device isolation -[2023-08-31 18:52:01,360][pytorch][INFO] - + Disabling gradients -[2023-08-31 18:52:01,360][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 18:52:01,482][pytorch][INFO] - + Turning on eval mode -[2023-08-31 18:52:01,483][inference][INFO] - Running inference benchmark -[2023-08-31 18:52:01,601][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:52:01,602][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 18:52:01,642][inference][INFO] - + Forward pass peak memory: 469.46304 (MB) -[2023-08-31 18:52:01,643][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 18:52:01,645][inference][INFO] - + Warming up the forward pass -[2023-08-31 18:52:01,696][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 18:52:06,724][inference][INFO] - + Forward pass latency: 4.99e-03 (s) -[2023-08-31 18:52:06,725][inference][INFO] - + Forward pass throughput: 802.00 (samples/s) -[2023-08-31 18:52:06,725][inference][INFO] - Saving inference results -[2023-08-31 18:52:06,732][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 4a4155d4ba2c6366db13529d43ebc07c442fc392..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d81245dfc1a3f678a4441541e0b97a611d0460ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.303296,0.00448,223.0,1.08,92.6 diff --git a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f1dc98612078a930240cdb07725ca9e0c9525275..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_17:14:18_0f08cd205a440d23e6bf924cddd73ff48e09fe35/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 18:52:11,527][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 18:52:11,528][benchmark][INFO] - + Setting seed(42) -[2023-08-31 18:52:13,041][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 18:52:13,041][backend][INFO] - Configuring pytorch backend -[2023-08-31 18:52:13,041][backend][INFO] - + Checking initial device isolation -[2023-08-31 18:52:13,041][backend][INFO] - + Checking contineous device isolation -[2023-08-31 18:52:13,042][pytorch][INFO] - + Disabling gradients -[2023-08-31 18:52:13,042][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 18:52:13,688][pytorch][INFO] - + Turning on eval mode -[2023-08-31 18:52:13,689][inference][INFO] - Running inference benchmark -[2023-08-31 18:52:13,901][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 18:52:13,950][inference][INFO] - + Forward pass peak memory: 469.303296 (MB) -[2023-08-31 18:52:13,952][inference][INFO] - + Warming up the forward pass -[2023-08-31 18:52:13,994][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 18:52:19,052][inference][INFO] - + Forward pass latency: 4.48e-03 (s) -[2023-08-31 18:52:19,054][inference][INFO] - + Forward pass throughput: 223.00 (samples/s) -[2023-08-31 18:52:19,055][inference][INFO] - + Warming up the generation pass -[2023-08-31 18:52:20,112][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 18:52:25,493][inference][INFO] - + Generation pass latency: 1.08e+00 (s) -[2023-08-31 18:52:25,495][inference][INFO] - + Generation pass throughput: 92.60 (tokens/s) -[2023-08-31 18:52:25,495][inference][INFO] - Saving inference results -[2023-08-31 18:52:25,512][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0c7bbb11c98c79063e428a69655bf536e2dc6706..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a723cd6fe1f68fd52e5566ca81dfd8c6d5c43e60..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.95980799999995,0.00588,170.0 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/main.log b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/main.log deleted file mode 100644 index 498fa0166b701a718a285ce1d51359e0154a2e3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 22:50:07,052][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 22:50:07,053][benchmark][INFO] - + Setting seed(42) -[2023-08-31 22:50:08,296][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 22:50:08,297][backend][INFO] - Configuring pytorch backend -[2023-08-31 22:50:08,297][backend][INFO] - + Checking initial device isolation -[2023-08-31 22:50:08,297][backend][INFO] - + Checking contineous device isolation -[2023-08-31 22:50:08,297][pytorch][INFO] - + Disabling gradients -[2023-08-31 22:50:08,297][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 22:50:08,936][pytorch][INFO] - + Turning on eval mode -[2023-08-31 22:50:08,936][inference][INFO] - Running inference benchmark -[2023-08-31 22:50:09,051][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 22:50:09,053][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 22:50:09,118][inference][INFO] - + Forward pass peak memory: 467.95980799999995 (MB) -[2023-08-31 22:50:09,119][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 22:50:09,121][inference][INFO] - + Warming up the forward pass -[2023-08-31 22:50:09,180][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 22:50:14,201][inference][INFO] - + Forward pass latency: 5.88e-03 (s) -[2023-08-31 22:50:14,202][inference][INFO] - + Forward pass throughput: 170.00 (samples/s) -[2023-08-31 22:50:14,202][inference][INFO] - Saving inference results -[2023-08-31 22:50:14,212][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 284d6a5d0c599703691f0fdc17efec87ceeb2e9b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 29de661c04fddff4d3e240bbeeca3c80804012de..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.99609599999997,0.0049,816.0 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/main.log b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/main.log deleted file mode 100644 index a70ec1d0d9e8b325dbb474a27203a5b72237b4b5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-31 22:50:14,597][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 22:50:14,599][benchmark][INFO] - + Setting seed(42) -[2023-08-31 22:50:15,082][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-31 22:50:15,082][backend][INFO] - Configuring pytorch backend -[2023-08-31 22:50:15,082][backend][INFO] - + Checking initial device isolation -[2023-08-31 22:50:15,082][backend][INFO] - + Checking contineous device isolation -[2023-08-31 22:50:15,082][pytorch][INFO] - + Disabling gradients -[2023-08-31 22:50:15,082][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 22:50:15,269][pytorch][INFO] - + Turning on eval mode -[2023-08-31 22:50:15,270][inference][INFO] - Running inference benchmark -[2023-08-31 22:50:15,523][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 22:50:15,525][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 22:50:15,573][inference][INFO] - + Forward pass peak memory: 468.99609599999997 (MB) -[2023-08-31 22:50:15,574][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-31 22:50:15,575][inference][INFO] - + Warming up the forward pass -[2023-08-31 22:50:15,652][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 22:50:20,679][inference][INFO] - + Forward pass latency: 4.90e-03 (s) -[2023-08-31 22:50:20,680][inference][INFO] - + Forward pass throughput: 816.00 (samples/s) -[2023-08-31 22:50:20,680][inference][INFO] - Saving inference results -[2023-08-31 22:50:20,688][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 8207d59e49d618a8b1fe80cc0b10ffe0cf68c86d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cebf2c7a267b1cf7a3f9617378d562a4a967c21a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.233664,0.00372,269.0,0.493,203.0 diff --git a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 78bc52849d175d3fe2985d2c64fc27459dc076d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-31_21:38:14_ef10dbce5cbc9a8b6a0a90b04378ca96f4023aa1/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-31 22:50:25,438][benchmark][INFO] - Configuring inference benchmark -[2023-08-31 22:50:25,440][benchmark][INFO] - + Setting seed(42) -[2023-08-31 22:50:26,957][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-31 22:50:26,958][backend][INFO] - Configuring pytorch backend -[2023-08-31 22:50:26,958][backend][INFO] - + Checking initial device isolation -[2023-08-31 22:50:26,958][backend][INFO] - + Checking contineous device isolation -[2023-08-31 22:50:26,958][pytorch][INFO] - + Disabling gradients -[2023-08-31 22:50:26,959][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-31 22:50:27,606][pytorch][INFO] - + Turning on eval mode -[2023-08-31 22:50:27,606][inference][INFO] - Running inference benchmark -[2023-08-31 22:50:27,812][inference][INFO] - + Tracking forward pass peak memory -[2023-08-31 22:50:27,860][inference][INFO] - + Forward pass peak memory: 469.233664 (MB) -[2023-08-31 22:50:27,861][inference][INFO] - + Warming up the forward pass -[2023-08-31 22:50:27,893][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-31 22:50:32,938][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-08-31 22:50:32,940][inference][INFO] - + Forward pass throughput: 269.00 (samples/s) -[2023-08-31 22:50:32,941][inference][INFO] - + Warming up the generation pass -[2023-08-31 22:50:33,531][inference][INFO] - + Tracking generation latency and throughput -[2023-08-31 22:50:38,959][inference][INFO] - + Generation pass latency: 4.93e-01 (s) -[2023-08-31 22:50:38,960][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-08-31 22:50:38,960][inference][INFO] - Saving inference results -[2023-08-31 22:50:38,972][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 34032a65fd37e42c020bee19e59dbf2b63fe1259..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b543224233053c99d7b033be870e98a2eaf77349..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.53324799999996,0.00598,167.0 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/main.log deleted file mode 100644 index ee33a47b4905a6a726d4c7d8d969b4f50b5345fe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 10:50:07,181][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 10:50:07,183][benchmark][INFO] - + Setting seed(42) -[2023-09-01 10:50:08,372][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 10:50:08,372][backend][INFO] - Configuring pytorch backend -[2023-09-01 10:50:08,372][backend][INFO] - + Checking initial device isolation -[2023-09-01 10:50:08,372][backend][INFO] - + Checking contineous device isolation -[2023-09-01 10:50:08,373][pytorch][INFO] - + Disabling gradients -[2023-09-01 10:50:08,373][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 10:50:09,000][pytorch][INFO] - + Turning on eval mode -[2023-09-01 10:50:09,001][inference][INFO] - Running inference benchmark -[2023-09-01 10:50:09,119][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:50:09,120][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 10:50:09,182][inference][INFO] - + Forward pass peak memory: 468.53324799999996 (MB) -[2023-09-01 10:50:09,183][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:50:09,184][inference][INFO] - + Warming up the forward pass -[2023-09-01 10:50:09,258][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 10:50:14,285][inference][INFO] - + Forward pass latency: 5.98e-03 (s) -[2023-09-01 10:50:14,286][inference][INFO] - + Forward pass throughput: 167.00 (samples/s) -[2023-09-01 10:50:14,286][inference][INFO] - Saving inference results -[2023-09-01 10:50:14,297][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d2cf51dffdeccff91055537d16fdc0a7ecb9538e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f1462551939e219080ad0333f756fa9268af97dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.55315199999995,0.00511,783.0 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1bf449b5f41bf64a2c7cb5cf018ffef1a68ea32c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 10:50:14,679][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 10:50:14,680][benchmark][INFO] - + Setting seed(42) -[2023-09-01 10:50:15,100][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 10:50:15,101][backend][INFO] - Configuring pytorch backend -[2023-09-01 10:50:15,101][backend][INFO] - + Checking initial device isolation -[2023-09-01 10:50:15,101][backend][INFO] - + Checking contineous device isolation -[2023-09-01 10:50:15,101][pytorch][INFO] - + Disabling gradients -[2023-09-01 10:50:15,101][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 10:50:15,213][pytorch][INFO] - + Turning on eval mode -[2023-09-01 10:50:15,213][inference][INFO] - Running inference benchmark -[2023-09-01 10:50:15,399][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:50:15,400][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 10:50:15,443][inference][INFO] - + Forward pass peak memory: 469.55315199999995 (MB) -[2023-09-01 10:50:15,444][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:50:15,445][inference][INFO] - + Warming up the forward pass -[2023-09-01 10:50:15,507][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 10:50:20,535][inference][INFO] - + Forward pass latency: 5.11e-03 (s) -[2023-09-01 10:50:20,536][inference][INFO] - + Forward pass throughput: 783.00 (samples/s) -[2023-09-01 10:50:20,536][inference][INFO] - Saving inference results -[2023-09-01 10:50:20,543][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 023995793bc52ae717bd436da5d6216ac5afeee1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index cfb0d658bd03423feca3574a7ece77f9fb29738d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.86911999999995,0.0044,227.0,0.513,195.0 diff --git a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4978bc3ff85d4fb73717a4844f3374a75c32a62a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:06_4ece3b9433ea0bedff0d64fe00623c35766d7d44/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 10:50:25,392][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 10:50:25,393][benchmark][INFO] - + Setting seed(42) -[2023-09-01 10:50:26,762][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 10:50:26,763][backend][INFO] - Configuring pytorch backend -[2023-09-01 10:50:26,763][backend][INFO] - + Checking initial device isolation -[2023-09-01 10:50:26,763][backend][INFO] - + Checking contineous device isolation -[2023-09-01 10:50:26,763][pytorch][INFO] - + Disabling gradients -[2023-09-01 10:50:26,764][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 10:50:27,405][pytorch][INFO] - + Turning on eval mode -[2023-09-01 10:50:27,406][inference][INFO] - Running inference benchmark -[2023-09-01 10:50:27,599][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 10:50:27,644][inference][INFO] - + Forward pass peak memory: 468.86911999999995 (MB) -[2023-09-01 10:50:27,645][inference][INFO] - + Warming up the forward pass -[2023-09-01 10:50:27,686][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 10:50:32,731][inference][INFO] - + Forward pass latency: 4.40e-03 (s) -[2023-09-01 10:50:32,733][inference][INFO] - + Forward pass throughput: 227.00 (samples/s) -[2023-09-01 10:50:32,734][inference][INFO] - + Warming up the generation pass -[2023-09-01 10:50:33,294][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 10:50:38,424][inference][INFO] - + Generation pass latency: 5.13e-01 (s) -[2023-09-01 10:50:38,425][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-09-01 10:50:38,425][inference][INFO] - Saving inference results -[2023-09-01 10:50:38,436][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 47dea986417ad7a9827fa6331921afb46c61365c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 4b0a07ffb75f667997480257cc7f6a4917c3f598..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.795392,0.00602,166.0 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9bb288d7df381eadb899381c298ca0be75f80c66..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 10:51:49,240][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 10:51:49,241][benchmark][INFO] - + Setting seed(42) -[2023-09-01 10:51:50,590][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 10:51:50,590][backend][INFO] - Configuring pytorch backend -[2023-09-01 10:51:50,590][backend][INFO] - + Checking initial device isolation -[2023-09-01 10:51:50,590][backend][INFO] - + Checking contineous device isolation -[2023-09-01 10:51:50,591][pytorch][INFO] - + Disabling gradients -[2023-09-01 10:51:50,591][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 10:51:51,256][pytorch][INFO] - + Turning on eval mode -[2023-09-01 10:51:51,257][inference][INFO] - Running inference benchmark -[2023-09-01 10:51:51,375][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:51:51,376][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 10:51:51,437][inference][INFO] - + Forward pass peak memory: 468.795392 (MB) -[2023-09-01 10:51:51,438][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:51:51,439][inference][INFO] - + Warming up the forward pass -[2023-09-01 10:51:51,512][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 10:51:56,538][inference][INFO] - + Forward pass latency: 6.02e-03 (s) -[2023-09-01 10:51:56,540][inference][INFO] - + Forward pass throughput: 166.00 (samples/s) -[2023-09-01 10:51:56,540][inference][INFO] - Saving inference results -[2023-09-01 10:51:56,551][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index bdd0ab0c462dd37008e817bf0177be599a4b885d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3ad64134376e1fe22313b68401b269d151233de5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.83168,0.00499,802.0 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/main.log deleted file mode 100644 index 16210b52e8528d07374236a012d68659bf606f26..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 10:51:56,931][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 10:51:56,932][benchmark][INFO] - + Setting seed(42) -[2023-09-01 10:51:57,504][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 10:51:57,504][backend][INFO] - Configuring pytorch backend -[2023-09-01 10:51:57,504][backend][INFO] - + Checking initial device isolation -[2023-09-01 10:51:57,504][backend][INFO] - + Checking contineous device isolation -[2023-09-01 10:51:57,505][pytorch][INFO] - + Disabling gradients -[2023-09-01 10:51:57,505][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 10:51:57,623][pytorch][INFO] - + Turning on eval mode -[2023-09-01 10:51:57,624][inference][INFO] - Running inference benchmark -[2023-09-01 10:51:57,747][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:51:57,748][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 10:51:57,792][inference][INFO] - + Forward pass peak memory: 469.83168 (MB) -[2023-09-01 10:51:57,793][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 10:51:57,794][inference][INFO] - + Warming up the forward pass -[2023-09-01 10:51:57,850][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 10:52:02,877][inference][INFO] - + Forward pass latency: 4.99e-03 (s) -[2023-09-01 10:52:02,878][inference][INFO] - + Forward pass throughput: 802.00 (samples/s) -[2023-09-01 10:52:02,878][inference][INFO] - Saving inference results -[2023-09-01 10:52:02,889][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b833ed39803bf92f1420c50888c31b3a445b8ead..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5c3a42b8031df99084e0e878cd985f64db1bc06e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.712896,0.00322,311.0,0.494,202.0 diff --git a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f0603140cf8d2a85554572e4f035578c49fd4a64..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_09:50:42_024acd271b60568bba214901a9e71d67c44353dc/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 10:52:07,803][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 10:52:07,804][benchmark][INFO] - + Setting seed(42) -[2023-09-01 10:52:09,290][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 10:52:09,290][backend][INFO] - Configuring pytorch backend -[2023-09-01 10:52:09,291][backend][INFO] - + Checking initial device isolation -[2023-09-01 10:52:09,291][backend][INFO] - + Checking contineous device isolation -[2023-09-01 10:52:09,291][pytorch][INFO] - + Disabling gradients -[2023-09-01 10:52:09,291][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 10:52:09,993][pytorch][INFO] - + Turning on eval mode -[2023-09-01 10:52:09,994][inference][INFO] - Running inference benchmark -[2023-09-01 10:52:10,296][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 10:52:10,347][inference][INFO] - + Forward pass peak memory: 469.712896 (MB) -[2023-09-01 10:52:10,348][inference][INFO] - + Warming up the forward pass -[2023-09-01 10:52:10,386][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 10:52:15,439][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-09-01 10:52:15,441][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-09-01 10:52:15,442][inference][INFO] - + Warming up the generation pass -[2023-09-01 10:52:15,938][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 10:52:21,375][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-09-01 10:52:21,376][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-01 10:52:21,376][inference][INFO] - Saving inference results -[2023-09-01 10:52:21,411][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5ea4688a7392a7b72913c4f49ecece7fef2b4ae0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 18100346eb725e5bc29d73c117afd92255e051af..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.103168,0.00603,166.0 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/main.log deleted file mode 100644 index f9143cdcb69e3dbab6ddcfea48b4f1a1aefda540..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 12:58:39,062][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 12:58:39,063][benchmark][INFO] - + Setting seed(42) -[2023-09-01 12:58:40,328][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 12:58:40,328][backend][INFO] - Configuring pytorch backend -[2023-09-01 12:58:40,329][backend][INFO] - + Checking initial device isolation -[2023-09-01 12:58:40,329][backend][INFO] - + Checking contineous device isolation -[2023-09-01 12:58:40,329][pytorch][INFO] - + Disabling gradients -[2023-09-01 12:58:40,329][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 12:58:40,957][pytorch][INFO] - + Turning on eval mode -[2023-09-01 12:58:40,958][inference][INFO] - Running inference benchmark -[2023-09-01 12:58:41,081][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 12:58:41,083][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 12:58:41,148][inference][INFO] - + Forward pass peak memory: 468.103168 (MB) -[2023-09-01 12:58:41,149][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 12:58:41,151][inference][INFO] - + Warming up the forward pass -[2023-09-01 12:58:41,211][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 12:58:46,237][inference][INFO] - + Forward pass latency: 6.03e-03 (s) -[2023-09-01 12:58:46,238][inference][INFO] - + Forward pass throughput: 166.00 (samples/s) -[2023-09-01 12:58:46,238][inference][INFO] - Saving inference results -[2023-09-01 12:58:46,249][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 95472535bfc0a68256475237580a43e0ccb53def..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 77e42a85fc612cfc8654b4d9f651902cbe4ec88c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.143552,0.00507,789.0 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/main.log deleted file mode 100644 index f7a3996641af1c755718269ba8327aee89cad313..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 12:58:46,640][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 12:58:46,642][benchmark][INFO] - + Setting seed(42) -[2023-09-01 12:58:47,085][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 12:58:47,085][backend][INFO] - Configuring pytorch backend -[2023-09-01 12:58:47,086][backend][INFO] - + Checking initial device isolation -[2023-09-01 12:58:47,086][backend][INFO] - + Checking contineous device isolation -[2023-09-01 12:58:47,086][pytorch][INFO] - + Disabling gradients -[2023-09-01 12:58:47,086][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 12:58:47,200][pytorch][INFO] - + Turning on eval mode -[2023-09-01 12:58:47,200][inference][INFO] - Running inference benchmark -[2023-09-01 12:58:47,325][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 12:58:47,326][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 12:58:47,369][inference][INFO] - + Forward pass peak memory: 469.143552 (MB) -[2023-09-01 12:58:47,370][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 12:58:47,371][inference][INFO] - + Warming up the forward pass -[2023-09-01 12:58:47,425][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 12:58:52,452][inference][INFO] - + Forward pass latency: 5.07e-03 (s) -[2023-09-01 12:58:52,453][inference][INFO] - + Forward pass throughput: 789.00 (samples/s) -[2023-09-01 12:58:52,453][inference][INFO] - Saving inference results -[2023-09-01 12:58:52,459][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c14cc653264e2e5506452cfef0e50e90bb4effbe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 41d4e2de26ed54147b046627768de67843fa9519..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.72108799999995,0.00382,262.0,0.538,186.0 diff --git a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 815f40e74ccd2aea7d77972f630d18f824bdb6e7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_11:05:53_53e2fd785b2792e20f13189d30d1d4ef7d9cf673/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 12:58:57,232][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 12:58:57,233][benchmark][INFO] - + Setting seed(42) -[2023-09-01 12:58:58,655][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 12:58:58,655][backend][INFO] - Configuring pytorch backend -[2023-09-01 12:58:58,655][backend][INFO] - + Checking initial device isolation -[2023-09-01 12:58:58,655][backend][INFO] - + Checking contineous device isolation -[2023-09-01 12:58:58,655][pytorch][INFO] - + Disabling gradients -[2023-09-01 12:58:58,656][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 12:58:59,282][pytorch][INFO] - + Turning on eval mode -[2023-09-01 12:58:59,283][inference][INFO] - Running inference benchmark -[2023-09-01 12:58:59,475][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 12:58:59,523][inference][INFO] - + Forward pass peak memory: 469.72108799999995 (MB) -[2023-09-01 12:58:59,525][inference][INFO] - + Warming up the forward pass -[2023-09-01 12:58:59,556][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 12:59:04,604][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-09-01 12:59:04,605][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-09-01 12:59:04,606][inference][INFO] - + Warming up the generation pass -[2023-09-01 12:59:05,112][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 12:59:10,489][inference][INFO] - + Generation pass latency: 5.38e-01 (s) -[2023-09-01 12:59:10,490][inference][INFO] - + Generation pass throughput: 186.00 (tokens/s) -[2023-09-01 12:59:10,490][inference][INFO] - Saving inference results -[2023-09-01 12:59:10,502][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 6f6a660be0ad01f6bcf7a87ef8bce5fce2c9522b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 1b880fa760f3bbc19ca1ad0f4844b8df27643456..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.36121599999996,0.0066,152.0 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/main.log deleted file mode 100644 index 803d10c3526705d74c8727640424bed819b91c05..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 14:51:13,978][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 14:51:13,979][benchmark][INFO] - + Setting seed(42) -[2023-09-01 14:51:15,227][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 14:51:15,228][backend][INFO] - Configuring pytorch backend -[2023-09-01 14:51:15,228][backend][INFO] - + Checking initial device isolation -[2023-09-01 14:51:15,228][backend][INFO] - + Checking contineous device isolation -[2023-09-01 14:51:15,228][pytorch][INFO] - + Disabling gradients -[2023-09-01 14:51:15,228][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 14:51:15,867][pytorch][INFO] - + Turning on eval mode -[2023-09-01 14:51:15,867][inference][INFO] - Running inference benchmark -[2023-09-01 14:51:15,991][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:51:15,993][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 14:51:16,060][inference][INFO] - + Forward pass peak memory: 468.36121599999996 (MB) -[2023-09-01 14:51:16,062][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:51:16,063][inference][INFO] - + Warming up the forward pass -[2023-09-01 14:51:16,145][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 14:51:21,173][inference][INFO] - + Forward pass latency: 6.60e-03 (s) -[2023-09-01 14:51:21,174][inference][INFO] - + Forward pass throughput: 152.00 (samples/s) -[2023-09-01 14:51:21,174][inference][INFO] - Saving inference results -[2023-09-01 14:51:21,185][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c42e547a13b3ae97cd736fc7cc3d2b0e433fa4ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 14a3ee04940d9c8bfe88169f6cf402d86f9e01d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.36064,0.0057,702.0 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/main.log deleted file mode 100644 index 904374bcbe6fa4ddcc070208cb4f606a97e28aa2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 14:51:21,576][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 14:51:21,577][benchmark][INFO] - + Setting seed(42) -[2023-09-01 14:51:22,014][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 14:51:22,015][backend][INFO] - Configuring pytorch backend -[2023-09-01 14:51:22,015][backend][INFO] - + Checking initial device isolation -[2023-09-01 14:51:22,015][backend][INFO] - + Checking contineous device isolation -[2023-09-01 14:51:22,015][pytorch][INFO] - + Disabling gradients -[2023-09-01 14:51:22,015][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 14:51:22,130][pytorch][INFO] - + Turning on eval mode -[2023-09-01 14:51:22,131][inference][INFO] - Running inference benchmark -[2023-09-01 14:51:22,255][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:51:22,257][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 14:51:22,301][inference][INFO] - + Forward pass peak memory: 469.36064 (MB) -[2023-09-01 14:51:22,302][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:51:22,303][inference][INFO] - + Warming up the forward pass -[2023-09-01 14:51:22,361][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 14:51:27,392][inference][INFO] - + Forward pass latency: 5.70e-03 (s) -[2023-09-01 14:51:27,393][inference][INFO] - + Forward pass throughput: 702.00 (samples/s) -[2023-09-01 14:51:27,393][inference][INFO] - Saving inference results -[2023-09-01 14:51:27,401][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index aeb482924f725b496d729e7c1863f099871ff84c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f2d48500487559e58aa2fffe7f4732c74dbf2721..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.54496,0.00403,248.0,0.519,193.0 diff --git a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 90a5af17694b3ee697fde699341a93c92218a273..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:09:12_16d6e3087cd35cb08ee24137900340d6924103dd/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 14:51:32,753][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 14:51:32,754][benchmark][INFO] - + Setting seed(42) -[2023-09-01 14:51:34,150][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 14:51:34,151][backend][INFO] - Configuring pytorch backend -[2023-09-01 14:51:34,151][backend][INFO] - + Checking initial device isolation -[2023-09-01 14:51:34,151][backend][INFO] - + Checking contineous device isolation -[2023-09-01 14:51:34,151][pytorch][INFO] - + Disabling gradients -[2023-09-01 14:51:34,151][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 14:51:34,844][pytorch][INFO] - + Turning on eval mode -[2023-09-01 14:51:34,845][inference][INFO] - Running inference benchmark -[2023-09-01 14:51:35,043][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 14:51:35,090][inference][INFO] - + Forward pass peak memory: 469.54496 (MB) -[2023-09-01 14:51:35,091][inference][INFO] - + Warming up the forward pass -[2023-09-01 14:51:35,138][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 14:51:40,185][inference][INFO] - + Forward pass latency: 4.03e-03 (s) -[2023-09-01 14:51:40,187][inference][INFO] - + Forward pass throughput: 248.00 (samples/s) -[2023-09-01 14:51:40,187][inference][INFO] - + Warming up the generation pass -[2023-09-01 14:51:40,696][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 14:51:45,885][inference][INFO] - + Generation pass latency: 5.19e-01 (s) -[2023-09-01 14:51:45,886][inference][INFO] - + Generation pass throughput: 193.00 (tokens/s) -[2023-09-01 14:51:45,886][inference][INFO] - Saving inference results -[2023-09-01 14:51:45,897][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 97218b5b987d196d311f7cc3d5cc4a8d3002e2bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 54beb1e7fa050537281b478f5c5931dfc5dc94fd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.840448,0.00775,129.0 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0891805904891f77dc43cf7fbe7c4c34d14b7f53..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 14:52:59,687][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 14:52:59,688][benchmark][INFO] - + Setting seed(42) -[2023-09-01 14:53:00,892][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 14:53:00,892][backend][INFO] - Configuring pytorch backend -[2023-09-01 14:53:00,892][backend][INFO] - + Checking initial device isolation -[2023-09-01 14:53:00,892][backend][INFO] - + Checking contineous device isolation -[2023-09-01 14:53:00,892][pytorch][INFO] - + Disabling gradients -[2023-09-01 14:53:00,893][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 14:53:01,524][pytorch][INFO] - + Turning on eval mode -[2023-09-01 14:53:01,525][inference][INFO] - Running inference benchmark -[2023-09-01 14:53:01,648][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:53:01,649][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 14:53:01,719][inference][INFO] - + Forward pass peak memory: 468.840448 (MB) -[2023-09-01 14:53:01,720][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:53:01,722][inference][INFO] - + Warming up the forward pass -[2023-09-01 14:53:01,823][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 14:53:06,844][inference][INFO] - + Forward pass latency: 7.75e-03 (s) -[2023-09-01 14:53:06,845][inference][INFO] - + Forward pass throughput: 129.00 (samples/s) -[2023-09-01 14:53:06,846][inference][INFO] - Saving inference results -[2023-09-01 14:53:06,855][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e081323948efd57fc927278357a7e4d7c205f401..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f47e426418862a5a4a3b3879f47468bcfeb4fac8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.83168,0.00537,745.0 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 186bbdd34d5ab3037bdb3d59c027de52e0451f04..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 14:53:07,278][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 14:53:07,279][benchmark][INFO] - + Setting seed(42) -[2023-09-01 14:53:07,745][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 14:53:07,745][backend][INFO] - Configuring pytorch backend -[2023-09-01 14:53:07,745][backend][INFO] - + Checking initial device isolation -[2023-09-01 14:53:07,746][backend][INFO] - + Checking contineous device isolation -[2023-09-01 14:53:07,746][pytorch][INFO] - + Disabling gradients -[2023-09-01 14:53:07,746][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 14:53:07,859][pytorch][INFO] - + Turning on eval mode -[2023-09-01 14:53:07,860][inference][INFO] - Running inference benchmark -[2023-09-01 14:53:07,986][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:53:07,988][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 14:53:08,034][inference][INFO] - + Forward pass peak memory: 469.83168 (MB) -[2023-09-01 14:53:08,035][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 14:53:08,038][inference][INFO] - + Warming up the forward pass -[2023-09-01 14:53:08,103][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 14:53:13,131][inference][INFO] - + Forward pass latency: 5.37e-03 (s) -[2023-09-01 14:53:13,132][inference][INFO] - + Forward pass throughput: 745.00 (samples/s) -[2023-09-01 14:53:13,132][inference][INFO] - Saving inference results -[2023-09-01 14:53:13,138][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e24a50aa4e7037bbb0add64d9c6a1e43524bd686..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index dc6b10e4fc93820f51b614d51156ff414ae33920..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.286912,0.00472,212.0,0.515,194.0 diff --git a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 263bee978dffab7645db47ff2855a9dde71f07ad..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_14:26:42_69c5b8f1861bf449339cbcdcd0d0e4a98e9a4c6b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 14:53:18,359][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 14:53:18,360][benchmark][INFO] - + Setting seed(42) -[2023-09-01 14:53:19,765][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 14:53:19,766][backend][INFO] - Configuring pytorch backend -[2023-09-01 14:53:19,766][backend][INFO] - + Checking initial device isolation -[2023-09-01 14:53:19,766][backend][INFO] - + Checking contineous device isolation -[2023-09-01 14:53:19,766][pytorch][INFO] - + Disabling gradients -[2023-09-01 14:53:19,766][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 14:53:20,420][pytorch][INFO] - + Turning on eval mode -[2023-09-01 14:53:20,421][inference][INFO] - Running inference benchmark -[2023-09-01 14:53:20,618][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 14:53:20,662][inference][INFO] - + Forward pass peak memory: 469.286912 (MB) -[2023-09-01 14:53:20,664][inference][INFO] - + Warming up the forward pass -[2023-09-01 14:53:20,705][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 14:53:25,748][inference][INFO] - + Forward pass latency: 4.72e-03 (s) -[2023-09-01 14:53:25,749][inference][INFO] - + Forward pass throughput: 212.00 (samples/s) -[2023-09-01 14:53:25,750][inference][INFO] - + Warming up the generation pass -[2023-09-01 14:53:26,299][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 14:53:31,450][inference][INFO] - + Generation pass latency: 5.15e-01 (s) -[2023-09-01 14:53:31,451][inference][INFO] - + Generation pass throughput: 194.00 (tokens/s) -[2023-09-01 14:53:31,451][inference][INFO] - Saving inference results -[2023-09-01 14:53:31,462][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7933150c7e648903059f902e7b930d5990345061..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 174399ebbb53be33c7448c88c0cccc4ce4c435db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.143552,0.00618,162.0 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/main.log deleted file mode 100644 index 21337488195ba1bf8e605670471591031d346dc0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 16:50:03,563][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:50:03,563][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:50:04,788][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 16:50:04,789][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:50:04,789][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:50:04,789][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:50:04,789][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:50:04,790][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:50:05,426][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:50:05,427][inference][INFO] - Running inference benchmark -[2023-09-01 16:50:05,554][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:50:05,556][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:50:05,635][inference][INFO] - + Forward pass peak memory: 469.143552 (MB) -[2023-09-01 16:50:05,636][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:50:05,638][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:50:05,768][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:50:10,795][inference][INFO] - + Forward pass latency: 6.18e-03 (s) -[2023-09-01 16:50:10,796][inference][INFO] - + Forward pass throughput: 162.00 (samples/s) -[2023-09-01 16:50:10,797][inference][INFO] - Saving inference results -[2023-09-01 16:50:10,807][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d146c8778f3902b02d5b45f503bdfb8fd00d789b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7993db8909f84e226574dcbabbe13158029a5097..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,470.15936,0.00532,752.0 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 069e010ed4ca7e0193a07a6bae8e4ebd596eeac8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 16:50:11,205][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:50:11,205][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:50:11,633][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 16:50:11,633][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:50:11,633][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:50:11,634][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:50:11,634][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:50:11,634][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:50:11,749][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:50:11,750][inference][INFO] - Running inference benchmark -[2023-09-01 16:50:11,876][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:50:11,877][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:50:11,919][inference][INFO] - + Forward pass peak memory: 470.15936 (MB) -[2023-09-01 16:50:11,920][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:50:11,922][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:50:11,976][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:50:17,004][inference][INFO] - + Forward pass latency: 5.32e-03 (s) -[2023-09-01 16:50:17,005][inference][INFO] - + Forward pass throughput: 752.00 (samples/s) -[2023-09-01 16:50:17,005][inference][INFO] - Saving inference results -[2023-09-01 16:50:17,011][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ec3d34d11878f77d46033f6370f94732b2e7044b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4cef43b79b0e55e9fa589c4dcf8f460c07b09c0c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.262336,0.00355,282.0,0.503,199.0 diff --git a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 17f2409d0dbc465ecb7930a08dbde3fe7faa1c13..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:24:12_be0e189bd3f2b5b960a4062361ead32c055a362e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 16:50:22,326][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:50:22,327][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:50:23,796][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 16:50:23,796][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:50:23,797][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:50:23,797][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:50:23,797][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:50:23,797][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:50:24,454][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:50:24,455][inference][INFO] - Running inference benchmark -[2023-09-01 16:50:24,654][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:50:24,701][inference][INFO] - + Forward pass peak memory: 469.262336 (MB) -[2023-09-01 16:50:24,702][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:50:24,751][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:50:29,802][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-09-01 16:50:29,804][inference][INFO] - + Forward pass throughput: 282.00 (samples/s) -[2023-09-01 16:50:29,805][inference][INFO] - + Warming up the generation pass -[2023-09-01 16:50:30,317][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 16:50:35,351][inference][INFO] - + Generation pass latency: 5.03e-01 (s) -[2023-09-01 16:50:35,352][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-09-01 16:50:35,352][inference][INFO] - Saving inference results -[2023-09-01 16:50:35,365][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c8da00bff633bfa9f96d6682c61e53888776349e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e8f64e6e4d305bd7bb2c3dadcaf889051c002e2a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.922368,0.00654,153.0 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5123a018014eb188afd1aacf271fb4597e314fd2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 16:51:47,743][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:51:47,744][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:51:49,122][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 16:51:49,122][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:51:49,122][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:51:49,122][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:51:49,123][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:51:49,123][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:51:49,722][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:51:49,722][inference][INFO] - Running inference benchmark -[2023-09-01 16:51:49,981][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:51:49,983][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:51:50,044][inference][INFO] - + Forward pass peak memory: 468.922368 (MB) -[2023-09-01 16:51:50,045][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:51:50,047][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:51:50,130][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:51:55,154][inference][INFO] - + Forward pass latency: 6.54e-03 (s) -[2023-09-01 16:51:55,154][inference][INFO] - + Forward pass throughput: 153.00 (samples/s) -[2023-09-01 16:51:55,155][inference][INFO] - Saving inference results -[2023-09-01 16:51:55,162][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a5fc62aec6b8c0a4e049829ce28a998648a6fe2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index bfdd41f58db9174841f2f7715c3d73e2a4d2e157..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.929984,0.00503,795.0 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/main.log deleted file mode 100644 index c3d5bee7f9762886c62831314d0c6644ee467395..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 16:51:55,550][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:51:55,551][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:51:55,987][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 16:51:55,988][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:51:55,988][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:51:55,988][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:51:55,988][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:51:55,988][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:51:56,134][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:51:56,135][inference][INFO] - Running inference benchmark -[2023-09-01 16:51:56,252][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:51:56,254][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:51:56,293][inference][INFO] - + Forward pass peak memory: 469.929984 (MB) -[2023-09-01 16:51:56,294][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:51:56,295][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:51:56,345][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:52:01,376][inference][INFO] - + Forward pass latency: 5.03e-03 (s) -[2023-09-01 16:52:01,377][inference][INFO] - + Forward pass throughput: 795.00 (samples/s) -[2023-09-01 16:52:01,377][inference][INFO] - Saving inference results -[2023-09-01 16:52:01,383][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6f32a43bc70f2e09711c6a1e02db3a1dab441b88..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f299425a311ce0e52304da98226a67174b632b0c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.536768,0.00451,222.0,0.529,189.0 diff --git a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 6019fdd203e3e38d3d778de0c5cfee9f30a6f80c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:39:00_b439129e74bb207138e49ffb1f147bd94aa58574/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 16:52:06,559][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:52:06,559][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:52:08,103][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 16:52:08,104][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:52:08,104][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:52:08,104][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:52:08,104][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:52:08,104][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:52:08,890][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:52:08,891][inference][INFO] - Running inference benchmark -[2023-09-01 16:52:09,098][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:52:09,146][inference][INFO] - + Forward pass peak memory: 469.536768 (MB) -[2023-09-01 16:52:09,147][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:52:09,189][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:52:14,231][inference][INFO] - + Forward pass latency: 4.51e-03 (s) -[2023-09-01 16:52:14,232][inference][INFO] - + Forward pass throughput: 222.00 (samples/s) -[2023-09-01 16:52:14,233][inference][INFO] - + Warming up the generation pass -[2023-09-01 16:52:14,826][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 16:52:20,118][inference][INFO] - + Generation pass latency: 5.29e-01 (s) -[2023-09-01 16:52:20,119][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s) -[2023-09-01 16:52:20,119][inference][INFO] - Saving inference results -[2023-09-01 16:52:20,129][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 86057caf328d51c22644d93d5b471ed839f3aa2e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a8c13a0342aefd82451f15dd1a03aa9258d0db05..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.24243199999995,0.00623,161.0 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/main.log deleted file mode 100644 index e16c4bd9601da7d1ed79d9389683002e04f43b9a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 16:53:31,138][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:53:31,139][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:53:32,423][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 16:53:32,423][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:53:32,423][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:53:32,423][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:53:32,423][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:53:32,424][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:53:33,081][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:53:33,082][inference][INFO] - Running inference benchmark -[2023-09-01 16:53:33,203][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:53:33,205][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:53:33,276][inference][INFO] - + Forward pass peak memory: 468.24243199999995 (MB) -[2023-09-01 16:53:33,277][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:53:33,280][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:53:33,379][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:53:38,402][inference][INFO] - + Forward pass latency: 6.23e-03 (s) -[2023-09-01 16:53:38,404][inference][INFO] - + Forward pass throughput: 161.00 (samples/s) -[2023-09-01 16:53:38,404][inference][INFO] - Saving inference results -[2023-09-01 16:53:38,415][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2ca1e6e82a2d786a8453c5b66d9246834e720d25..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ccd5c294696a597038f4c81b2469784d638a55c2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.27871999999996,0.0051,784.0 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/main.log deleted file mode 100644 index fa48162c4f2ca1c99c32e4313da934478d2e8edc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 16:53:39,044][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:53:39,045][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:53:39,474][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 16:53:39,474][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:53:39,474][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:53:39,474][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:53:39,475][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:53:39,475][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:53:39,603][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:53:39,604][inference][INFO] - Running inference benchmark -[2023-09-01 16:53:39,722][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:53:39,723][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:53:39,764][inference][INFO] - + Forward pass peak memory: 469.27871999999996 (MB) -[2023-09-01 16:53:39,765][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 16:53:39,767][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:53:39,826][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:53:44,853][inference][INFO] - + Forward pass latency: 5.10e-03 (s) -[2023-09-01 16:53:44,854][inference][INFO] - + Forward pass throughput: 784.00 (samples/s) -[2023-09-01 16:53:44,854][inference][INFO] - Saving inference results -[2023-09-01 16:53:44,863][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1b746e47980b5a226da77e295f9f707fd63b6bcd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2788266feaf2fedd72ecad88fa5efc53337b3457..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.33196799999996,0.00378,265.0,0.544,184.0 diff --git a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 978cae73a7f59ee8c8681e8203b976433d3b0cc4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_15:50:59_1fa2d89a9bb98a15e9720190e07d272a42f03d28/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 16:53:49,572][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 16:53:49,572][benchmark][INFO] - + Setting seed(42) -[2023-09-01 16:53:51,106][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 16:53:51,106][backend][INFO] - Configuring pytorch backend -[2023-09-01 16:53:51,107][backend][INFO] - + Checking initial device isolation -[2023-09-01 16:53:51,107][backend][INFO] - + Checking contineous device isolation -[2023-09-01 16:53:51,107][pytorch][INFO] - + Disabling gradients -[2023-09-01 16:53:51,107][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 16:53:51,735][pytorch][INFO] - + Turning on eval mode -[2023-09-01 16:53:51,736][inference][INFO] - Running inference benchmark -[2023-09-01 16:53:51,925][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 16:53:51,973][inference][INFO] - + Forward pass peak memory: 469.33196799999996 (MB) -[2023-09-01 16:53:51,974][inference][INFO] - + Warming up the forward pass -[2023-09-01 16:53:52,010][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 16:53:57,053][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-09-01 16:53:57,055][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-09-01 16:53:57,056][inference][INFO] - + Warming up the generation pass -[2023-09-01 16:53:57,643][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 16:54:03,087][inference][INFO] - + Generation pass latency: 5.44e-01 (s) -[2023-09-01 16:54:03,089][inference][INFO] - + Generation pass throughput: 184.00 (tokens/s) -[2023-09-01 16:54:03,089][inference][INFO] - Saving inference results -[2023-09-01 16:54:03,101][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7da89e1c9a0f329bf26f909781d81473c1abcc8f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3eca690fe0926db1fbf930cf6ccaad158ebcf4c9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.07859199999996,0.00697,143.0 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/main.log deleted file mode 100644 index a637bf508f173227ed8a07c5df37fc1f5a23df30..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 18:50:01,803][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 18:50:01,804][benchmark][INFO] - + Setting seed(42) -[2023-09-01 18:50:03,275][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 18:50:03,275][backend][INFO] - Configuring pytorch backend -[2023-09-01 18:50:03,275][backend][INFO] - + Checking initial device isolation -[2023-09-01 18:50:03,276][backend][INFO] - + Checking contineous device isolation -[2023-09-01 18:50:03,276][pytorch][INFO] - + Disabling gradients -[2023-09-01 18:50:03,276][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 18:50:03,905][pytorch][INFO] - + Turning on eval mode -[2023-09-01 18:50:03,906][inference][INFO] - Running inference benchmark -[2023-09-01 18:50:04,031][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:50:04,033][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 18:50:04,097][inference][INFO] - + Forward pass peak memory: 468.07859199999996 (MB) -[2023-09-01 18:50:04,098][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:50:04,100][inference][INFO] - + Warming up the forward pass -[2023-09-01 18:50:04,176][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 18:50:09,202][inference][INFO] - + Forward pass latency: 6.97e-03 (s) -[2023-09-01 18:50:09,203][inference][INFO] - + Forward pass throughput: 143.00 (samples/s) -[2023-09-01 18:50:09,203][inference][INFO] - Saving inference results -[2023-09-01 18:50:09,216][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 0556c5541a619864ab8afc0b1e78bfd2d94bbd41..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 8b2ecf70ff116a3ef1dd09559d8a5312ee689053..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.078016,0.00624,641.0 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6caa6de33fff894d4b580412c24da0921debb561..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 18:50:09,593][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 18:50:09,595][benchmark][INFO] - + Setting seed(42) -[2023-09-01 18:50:10,033][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 18:50:10,033][backend][INFO] - Configuring pytorch backend -[2023-09-01 18:50:10,034][backend][INFO] - + Checking initial device isolation -[2023-09-01 18:50:10,034][backend][INFO] - + Checking contineous device isolation -[2023-09-01 18:50:10,034][pytorch][INFO] - + Disabling gradients -[2023-09-01 18:50:10,034][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 18:50:10,164][pytorch][INFO] - + Turning on eval mode -[2023-09-01 18:50:10,165][inference][INFO] - Running inference benchmark -[2023-09-01 18:50:10,292][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:50:10,294][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 18:50:10,339][inference][INFO] - + Forward pass peak memory: 469.078016 (MB) -[2023-09-01 18:50:10,340][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:50:10,342][inference][INFO] - + Warming up the forward pass -[2023-09-01 18:50:10,405][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 18:50:15,431][inference][INFO] - + Forward pass latency: 6.24e-03 (s) -[2023-09-01 18:50:15,432][inference][INFO] - + Forward pass throughput: 641.00 (samples/s) -[2023-09-01 18:50:15,432][inference][INFO] - Saving inference results -[2023-09-01 18:50:15,440][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9e84e0ad49f069937f463f60baf1b47b7ece3511..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 920d766fdc8be0888eee1737d9c55de65dbe68d4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.7088,0.00374,267.0,0.503,199.0 diff --git a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index eec8183ef2e3746c9a460b74af14b5cb3e08e165..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:27:01_3587769c08ffaf42c99f6882d4ad76d3a3669e5e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 18:50:20,365][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 18:50:20,367][benchmark][INFO] - + Setting seed(42) -[2023-09-01 18:50:21,867][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 18:50:21,868][backend][INFO] - Configuring pytorch backend -[2023-09-01 18:50:21,868][backend][INFO] - + Checking initial device isolation -[2023-09-01 18:50:21,868][backend][INFO] - + Checking contineous device isolation -[2023-09-01 18:50:21,868][pytorch][INFO] - + Disabling gradients -[2023-09-01 18:50:21,869][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 18:50:22,554][pytorch][INFO] - + Turning on eval mode -[2023-09-01 18:50:22,554][inference][INFO] - Running inference benchmark -[2023-09-01 18:50:22,758][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 18:50:22,805][inference][INFO] - + Forward pass peak memory: 469.7088 (MB) -[2023-09-01 18:50:22,806][inference][INFO] - + Warming up the forward pass -[2023-09-01 18:50:22,837][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 18:50:27,885][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-09-01 18:50:27,887][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-09-01 18:50:27,888][inference][INFO] - + Warming up the generation pass -[2023-09-01 18:50:28,443][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 18:50:33,476][inference][INFO] - + Generation pass latency: 5.03e-01 (s) -[2023-09-01 18:50:33,477][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-09-01 18:50:33,477][inference][INFO] - Saving inference results -[2023-09-01 18:50:33,490][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0913ab36ed9bf348516ec61591858f6b8b390f8f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 50e4b855e5026694639fd3dfd5a9667969724f72..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.04115199999995,0.00698,143.0 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/main.log deleted file mode 100644 index 4ce2275a4de72938ab4debc88eb38bccfafe7fa6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 18:51:42,117][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 18:51:42,117][benchmark][INFO] - + Setting seed(42) -[2023-09-01 18:51:43,423][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 18:51:43,424][backend][INFO] - Configuring pytorch backend -[2023-09-01 18:51:43,424][backend][INFO] - + Checking initial device isolation -[2023-09-01 18:51:43,424][backend][INFO] - + Checking contineous device isolation -[2023-09-01 18:51:43,424][pytorch][INFO] - + Disabling gradients -[2023-09-01 18:51:43,425][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 18:51:44,044][pytorch][INFO] - + Turning on eval mode -[2023-09-01 18:51:44,045][inference][INFO] - Running inference benchmark -[2023-09-01 18:51:44,164][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:51:44,165][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 18:51:44,232][inference][INFO] - + Forward pass peak memory: 469.04115199999995 (MB) -[2023-09-01 18:51:44,234][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:51:44,235][inference][INFO] - + Warming up the forward pass -[2023-09-01 18:51:44,310][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 18:51:49,335][inference][INFO] - + Forward pass latency: 6.98e-03 (s) -[2023-09-01 18:51:49,337][inference][INFO] - + Forward pass throughput: 143.00 (samples/s) -[2023-09-01 18:51:49,337][inference][INFO] - Saving inference results -[2023-09-01 18:51:49,348][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 59106b8d5eb31a0c3578a764fea9850f3ec3c314..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 2542ddb2e2c0bb1c6cff5ecd474bb4d01aa603d4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,470.02419199999997,0.00614,651.0 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/main.log deleted file mode 100644 index 87c58c87dc520fd12ab7ee7fbf44f019aa3c4665..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 18:51:49,747][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 18:51:49,748][benchmark][INFO] - + Setting seed(42) -[2023-09-01 18:51:50,205][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 18:51:50,205][backend][INFO] - Configuring pytorch backend -[2023-09-01 18:51:50,205][backend][INFO] - + Checking initial device isolation -[2023-09-01 18:51:50,205][backend][INFO] - + Checking contineous device isolation -[2023-09-01 18:51:50,205][pytorch][INFO] - + Disabling gradients -[2023-09-01 18:51:50,205][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 18:51:50,321][pytorch][INFO] - + Turning on eval mode -[2023-09-01 18:51:50,322][inference][INFO] - Running inference benchmark -[2023-09-01 18:51:50,444][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:51:50,445][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 18:51:50,493][inference][INFO] - + Forward pass peak memory: 470.02419199999997 (MB) -[2023-09-01 18:51:50,494][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 18:51:50,496][inference][INFO] - + Warming up the forward pass -[2023-09-01 18:51:50,559][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 18:51:55,586][inference][INFO] - + Forward pass latency: 6.14e-03 (s) -[2023-09-01 18:51:55,587][inference][INFO] - + Forward pass throughput: 651.00 (samples/s) -[2023-09-01 18:51:55,587][inference][INFO] - Saving inference results -[2023-09-01 18:51:55,595][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a1ac90b833f3314f83e938ea3c8e815de2984053..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 7af1fefde26b6f236ed2ee6d24e73895099e718c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.27462399999996,0.00369,271.0,0.556,180.0 diff --git a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4887e1f7ef4a79e43fc51d275dab28ab088b8c8c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_18:40:40_a4dd53d88e4852f023332d284ff07a01afcd5681/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 18:52:00,388][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 18:52:00,390][benchmark][INFO] - + Setting seed(42) -[2023-09-01 18:52:01,941][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 18:52:01,941][backend][INFO] - Configuring pytorch backend -[2023-09-01 18:52:01,941][backend][INFO] - + Checking initial device isolation -[2023-09-01 18:52:01,942][backend][INFO] - + Checking contineous device isolation -[2023-09-01 18:52:01,942][pytorch][INFO] - + Disabling gradients -[2023-09-01 18:52:01,942][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 18:52:02,602][pytorch][INFO] - + Turning on eval mode -[2023-09-01 18:52:02,602][inference][INFO] - Running inference benchmark -[2023-09-01 18:52:02,798][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 18:52:02,846][inference][INFO] - + Forward pass peak memory: 469.27462399999996 (MB) -[2023-09-01 18:52:02,848][inference][INFO] - + Warming up the forward pass -[2023-09-01 18:52:02,879][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 18:52:07,925][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-09-01 18:52:07,926][inference][INFO] - + Forward pass throughput: 271.00 (samples/s) -[2023-09-01 18:52:07,927][inference][INFO] - + Warming up the generation pass -[2023-09-01 18:52:08,517][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 18:52:13,519][inference][INFO] - + Generation pass latency: 5.56e-01 (s) -[2023-09-01 18:52:13,520][inference][INFO] - + Generation pass throughput: 180.00 (tokens/s) -[2023-09-01 18:52:13,520][inference][INFO] - Saving inference results -[2023-09-01 18:52:13,532][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index cee1466518dce4600d5dd545f156a1b128c7961a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 30da8a9c1c1de32a11396a7621c89c17af425a9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.611072,0.00684,146.0 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/main.log b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/main.log deleted file mode 100644 index c035c424cb62f0cf96c3da42c976fa6fe9d4a537..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 20:50:05,703][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 20:50:05,704][benchmark][INFO] - + Setting seed(42) -[2023-09-01 20:50:06,947][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 20:50:06,948][backend][INFO] - Configuring pytorch backend -[2023-09-01 20:50:06,948][backend][INFO] - + Checking initial device isolation -[2023-09-01 20:50:06,948][backend][INFO] - + Checking contineous device isolation -[2023-09-01 20:50:06,948][pytorch][INFO] - + Disabling gradients -[2023-09-01 20:50:06,949][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 20:50:07,568][pytorch][INFO] - + Turning on eval mode -[2023-09-01 20:50:07,568][inference][INFO] - Running inference benchmark -[2023-09-01 20:50:07,697][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 20:50:07,698][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 20:50:07,765][inference][INFO] - + Forward pass peak memory: 468.611072 (MB) -[2023-09-01 20:50:07,767][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 20:50:07,768][inference][INFO] - + Warming up the forward pass -[2023-09-01 20:50:07,828][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 20:50:12,852][inference][INFO] - + Forward pass latency: 6.84e-03 (s) -[2023-09-01 20:50:12,854][inference][INFO] - + Forward pass throughput: 146.00 (samples/s) -[2023-09-01 20:50:12,854][inference][INFO] - Saving inference results -[2023-09-01 20:50:12,866][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 442b06e7431f7de60e3ebf7392e79947c13a37da..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a8ce26dce19c6a5d3fff1c6809b47a0977fa3ec2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.62278399999997,0.00635,630.0 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/main.log b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/main.log deleted file mode 100644 index 859a40a34cd924235e6413a229bfa4adf177c886..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-01 20:50:13,241][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 20:50:13,241][benchmark][INFO] - + Setting seed(42) -[2023-09-01 20:50:13,705][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-01 20:50:13,706][backend][INFO] - Configuring pytorch backend -[2023-09-01 20:50:13,706][backend][INFO] - + Checking initial device isolation -[2023-09-01 20:50:13,706][backend][INFO] - + Checking contineous device isolation -[2023-09-01 20:50:13,706][pytorch][INFO] - + Disabling gradients -[2023-09-01 20:50:13,707][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 20:50:13,825][pytorch][INFO] - + Turning on eval mode -[2023-09-01 20:50:13,825][inference][INFO] - Running inference benchmark -[2023-09-01 20:50:13,949][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 20:50:13,950][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 20:50:14,000][inference][INFO] - + Forward pass peak memory: 469.62278399999997 (MB) -[2023-09-01 20:50:14,001][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-01 20:50:14,003][inference][INFO] - + Warming up the forward pass -[2023-09-01 20:50:14,066][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 20:50:19,090][inference][INFO] - + Forward pass latency: 6.35e-03 (s) -[2023-09-01 20:50:19,091][inference][INFO] - + Forward pass throughput: 630.00 (samples/s) -[2023-09-01 20:50:19,091][inference][INFO] - Saving inference results -[2023-09-01 20:50:19,099][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d587f9cce23a7851efe6c2c10ba6bf4397e4c9a7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 10ae20a90096332949633d4ce4ecfe773ab71bc9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.143552,0.00382,262.0,0.496,202.0 diff --git a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 666e7fbb65167bb46b4f090c3c8b7b03f99b81ed..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-01_19:27:01_0afa5071bd84e44301750fdc594e33db102cf374/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-01 20:50:23,961][benchmark][INFO] - Configuring inference benchmark -[2023-09-01 20:50:23,963][benchmark][INFO] - + Setting seed(42) -[2023-09-01 20:50:25,345][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-01 20:50:25,346][backend][INFO] - Configuring pytorch backend -[2023-09-01 20:50:25,346][backend][INFO] - + Checking initial device isolation -[2023-09-01 20:50:25,346][backend][INFO] - + Checking contineous device isolation -[2023-09-01 20:50:25,346][pytorch][INFO] - + Disabling gradients -[2023-09-01 20:50:25,346][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-01 20:50:25,980][pytorch][INFO] - + Turning on eval mode -[2023-09-01 20:50:25,980][inference][INFO] - Running inference benchmark -[2023-09-01 20:50:26,174][inference][INFO] - + Tracking forward pass peak memory -[2023-09-01 20:50:26,222][inference][INFO] - + Forward pass peak memory: 469.143552 (MB) -[2023-09-01 20:50:26,224][inference][INFO] - + Warming up the forward pass -[2023-09-01 20:50:26,256][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-01 20:50:31,301][inference][INFO] - + Forward pass latency: 3.82e-03 (s) -[2023-09-01 20:50:31,303][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-09-01 20:50:31,304][inference][INFO] - + Warming up the generation pass -[2023-09-01 20:50:31,908][inference][INFO] - + Tracking generation latency and throughput -[2023-09-01 20:50:37,367][inference][INFO] - + Generation pass latency: 4.96e-01 (s) -[2023-09-01 20:50:37,368][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-01 20:50:37,368][inference][INFO] - Saving inference results -[2023-09-01 20:50:37,381][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5864d2c5862068ae86f56c65f9a938a906d62b7c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 45bcf20a2d71309b705dd261347ea8e829f1e4a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.62336,0.00698,143.0 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9da7b4959290ed2b07320672cc002ac5bc4142ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 08:50:11,569][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 08:50:11,570][benchmark][INFO] - + Setting seed(42) -[2023-09-04 08:50:12,856][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 08:50:12,856][backend][INFO] - Configuring pytorch backend -[2023-09-04 08:50:12,856][backend][INFO] - + Checking initial device isolation -[2023-09-04 08:50:12,856][backend][INFO] - + Checking contineous device isolation -[2023-09-04 08:50:12,856][pytorch][INFO] - + Disabling gradients -[2023-09-04 08:50:12,857][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 08:50:13,481][pytorch][INFO] - + Turning on eval mode -[2023-09-04 08:50:13,482][inference][INFO] - Running inference benchmark -[2023-09-04 08:50:13,598][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 08:50:13,600][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 08:50:13,662][inference][INFO] - + Forward pass peak memory: 468.62336 (MB) -[2023-09-04 08:50:13,663][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 08:50:13,665][inference][INFO] - + Warming up the forward pass -[2023-09-04 08:50:13,724][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 08:50:18,745][inference][INFO] - + Forward pass latency: 6.98e-03 (s) -[2023-09-04 08:50:18,746][inference][INFO] - + Forward pass throughput: 143.00 (samples/s) -[2023-09-04 08:50:18,746][inference][INFO] - Saving inference results -[2023-09-04 08:50:18,756][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c454d1f7e5ef5de342d09dac31b1e8d9b6d0ef50..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 27a8a370e25d6e9dfa011154864f426df70b4d88..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.635072,0.00671,596.0 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/main.log deleted file mode 100644 index 82a83facb28e1fa91d5434fd64ea745eb23415cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 08:50:19,130][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 08:50:19,131][benchmark][INFO] - + Setting seed(42) -[2023-09-04 08:50:19,617][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 08:50:19,617][backend][INFO] - Configuring pytorch backend -[2023-09-04 08:50:19,618][backend][INFO] - + Checking initial device isolation -[2023-09-04 08:50:19,618][backend][INFO] - + Checking contineous device isolation -[2023-09-04 08:50:19,618][pytorch][INFO] - + Disabling gradients -[2023-09-04 08:50:19,618][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 08:50:19,840][pytorch][INFO] - + Turning on eval mode -[2023-09-04 08:50:19,841][inference][INFO] - Running inference benchmark -[2023-09-04 08:50:19,963][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 08:50:19,964][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 08:50:20,007][inference][INFO] - + Forward pass peak memory: 469.635072 (MB) -[2023-09-04 08:50:20,008][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 08:50:20,010][inference][INFO] - + Warming up the forward pass -[2023-09-04 08:50:20,072][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 08:50:25,094][inference][INFO] - + Forward pass latency: 6.71e-03 (s) -[2023-09-04 08:50:25,095][inference][INFO] - + Forward pass throughput: 596.00 (samples/s) -[2023-09-04 08:50:25,096][inference][INFO] - Saving inference results -[2023-09-04 08:50:25,102][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 17ad369c7ef4ed8982de202cf39938ebc6def743..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index aaf2d362f6b05cf473e7a3956d0468ac998daac1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.303296,0.00501,200.0,0.494,202.0 diff --git a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 267c96d8892be50ab0af0696921f721dc66783dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_07:06:11_ab8cba824e3887d90cb9f4d5866fde9243f2c9fe/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 08:50:30,081][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 08:50:30,082][benchmark][INFO] - + Setting seed(42) -[2023-09-04 08:50:31,672][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 08:50:31,672][backend][INFO] - Configuring pytorch backend -[2023-09-04 08:50:31,672][backend][INFO] - + Checking initial device isolation -[2023-09-04 08:50:31,672][backend][INFO] - + Checking contineous device isolation -[2023-09-04 08:50:31,672][pytorch][INFO] - + Disabling gradients -[2023-09-04 08:50:31,673][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 08:50:32,327][pytorch][INFO] - + Turning on eval mode -[2023-09-04 08:50:32,327][inference][INFO] - Running inference benchmark -[2023-09-04 08:50:32,524][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 08:50:32,568][inference][INFO] - + Forward pass peak memory: 469.303296 (MB) -[2023-09-04 08:50:32,569][inference][INFO] - + Warming up the forward pass -[2023-09-04 08:50:32,605][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 08:50:37,645][inference][INFO] - + Forward pass latency: 5.01e-03 (s) -[2023-09-04 08:50:37,647][inference][INFO] - + Forward pass throughput: 200.00 (samples/s) -[2023-09-04 08:50:37,648][inference][INFO] - + Warming up the generation pass -[2023-09-04 08:50:38,191][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 08:50:43,627][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-09-04 08:50:43,628][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-04 08:50:43,628][inference][INFO] - Saving inference results -[2023-09-04 08:50:43,644][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b6baa7bc6110f8ce8dabfa0612d65a7b619143a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b51e90477603be059588cf4812e94335ee92a17e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.17632,0.0064,156.0 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/main.log deleted file mode 100644 index 3f4e86b73c0e6e2a953691c72213124769770dc8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:50:05,711][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:50:05,712][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:50:06,935][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:50:06,935][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:50:06,935][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:50:06,935][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:50:06,935][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:50:06,936][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:50:07,580][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:50:07,580][inference][INFO] - Running inference benchmark -[2023-09-04 10:50:07,725][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:50:07,727][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:50:07,792][inference][INFO] - + Forward pass peak memory: 469.17632 (MB) -[2023-09-04 10:50:07,793][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:50:07,795][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:50:07,855][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:50:12,877][inference][INFO] - + Forward pass latency: 6.40e-03 (s) -[2023-09-04 10:50:12,877][inference][INFO] - + Forward pass throughput: 156.00 (samples/s) -[2023-09-04 10:50:12,878][inference][INFO] - Saving inference results -[2023-09-04 10:50:12,889][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8e172655811e267f98b0f08e4d6cb8a642d5b28d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 0c30ecb8b94e808c3b8f29763b033ae88b6c6653..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,470.18803199999996,0.0051,784.0 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8417e78bd630610c1361a30c945e6dbe621c7c68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:50:13,259][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:50:13,260][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:50:13,703][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:50:13,704][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:50:13,704][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:50:13,704][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:50:13,704][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:50:13,704][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:50:13,816][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:50:13,816][inference][INFO] - Running inference benchmark -[2023-09-04 10:50:13,938][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:50:13,939][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:50:13,987][inference][INFO] - + Forward pass peak memory: 470.18803199999996 (MB) -[2023-09-04 10:50:13,987][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:50:13,989][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:50:14,042][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:50:19,071][inference][INFO] - + Forward pass latency: 5.10e-03 (s) -[2023-09-04 10:50:19,072][inference][INFO] - + Forward pass throughput: 784.00 (samples/s) -[2023-09-04 10:50:19,072][inference][INFO] - Saving inference results -[2023-09-04 10:50:19,080][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a60907ba529d80f10e508abcc2521228d0d714b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f9950f86154d3d0396d2e4df7c58ec971821d2e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.487616,0.00373,268.0,0.523,191.0 diff --git a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9555614d74e582300047cc996949a6584ee6850c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_09:35:39_b1d475f6d249d37cb3d1bf417eaf2b1f8cff2b34/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 10:50:23,862][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:50:23,864][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:50:25,566][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 10:50:25,567][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:50:25,567][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:50:25,567][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:50:25,567][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:50:25,567][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:50:26,218][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:50:26,219][inference][INFO] - Running inference benchmark -[2023-09-04 10:50:26,746][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:50:26,800][inference][INFO] - + Forward pass peak memory: 469.487616 (MB) -[2023-09-04 10:50:26,801][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:50:26,834][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:50:31,881][inference][INFO] - + Forward pass latency: 3.73e-03 (s) -[2023-09-04 10:50:31,883][inference][INFO] - + Forward pass throughput: 268.00 (samples/s) -[2023-09-04 10:50:31,884][inference][INFO] - + Warming up the generation pass -[2023-09-04 10:50:32,476][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 10:50:37,709][inference][INFO] - + Generation pass latency: 5.23e-01 (s) -[2023-09-04 10:50:37,710][inference][INFO] - + Generation pass throughput: 191.00 (tokens/s) -[2023-09-04 10:50:37,710][inference][INFO] - Saving inference results -[2023-09-04 10:50:37,722][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2de6328fafc8c0d94f0178d4ff631cea340a7a33..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6224bca494c880837b35f29cd825ae71072f0d68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.856832,0.00692,145.0 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/main.log deleted file mode 100644 index 065e346872e6404b52b6df3cc7f4e673fbafc9f0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:51:47,722][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:51:47,723][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:51:49,133][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:51:49,133][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:51:49,133][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:51:49,133][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:51:49,134][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:51:49,134][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:51:49,756][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:51:49,757][inference][INFO] - Running inference benchmark -[2023-09-04 10:51:49,877][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:51:49,878][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:51:49,946][inference][INFO] - + Forward pass peak memory: 468.856832 (MB) -[2023-09-04 10:51:49,947][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:51:49,949][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:51:50,023][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:51:55,046][inference][INFO] - + Forward pass latency: 6.92e-03 (s) -[2023-09-04 10:51:55,048][inference][INFO] - + Forward pass throughput: 145.00 (samples/s) -[2023-09-04 10:51:55,048][inference][INFO] - Saving inference results -[2023-09-04 10:51:55,080][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5c3498457fc091efc0ffea509db34843fcd40d88..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 048ee17c1e5334e9513250dfdd9f07ded608a0ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.87264,0.00634,631.0 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/main.log deleted file mode 100644 index 31dfee5fdc18a044735abf14a4e7ffae5e5046d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:51:55,458][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:51:55,459][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:51:55,905][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:51:55,905][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:51:55,905][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:51:55,905][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:51:55,905][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:51:55,906][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:51:56,024][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:51:56,025][inference][INFO] - Running inference benchmark -[2023-09-04 10:51:56,148][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:51:56,150][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:51:56,199][inference][INFO] - + Forward pass peak memory: 469.87264 (MB) -[2023-09-04 10:51:56,200][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:51:56,201][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:51:56,265][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:52:01,291][inference][INFO] - + Forward pass latency: 6.34e-03 (s) -[2023-09-04 10:52:01,292][inference][INFO] - + Forward pass throughput: 631.00 (samples/s) -[2023-09-04 10:52:01,292][inference][INFO] - Saving inference results -[2023-09-04 10:52:01,299][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e791f67b798f73be072e5c309d9a815f833ae883..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index eb9921d85190b87aa24bf301715f16f7c59f87dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.64736,0.00376,266.0,0.509,196.0 diff --git a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index aa3476b4b98d515c1ea544adaec9b9382b96fdf9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:15:12_0f0e1a2c2bff68541a5b9770d78e0fb6feb7de72/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 10:52:06,078][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:52:06,080][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:52:07,774][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 10:52:07,775][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:52:07,775][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:52:07,775][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:52:07,776][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:52:07,776][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:52:08,413][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:52:08,414][inference][INFO] - Running inference benchmark -[2023-09-04 10:52:08,618][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:52:08,666][inference][INFO] - + Forward pass peak memory: 469.64736 (MB) -[2023-09-04 10:52:08,668][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:52:08,699][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:52:13,746][inference][INFO] - + Forward pass latency: 3.76e-03 (s) -[2023-09-04 10:52:13,747][inference][INFO] - + Forward pass throughput: 266.00 (samples/s) -[2023-09-04 10:52:13,748][inference][INFO] - + Warming up the generation pass -[2023-09-04 10:52:14,341][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 10:52:19,430][inference][INFO] - + Generation pass latency: 5.09e-01 (s) -[2023-09-04 10:52:19,431][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-09-04 10:52:19,432][inference][INFO] - Saving inference results -[2023-09-04 10:52:19,444][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e98fca969127e4306fbbe87f1eb661fa16a9a599..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8187fb3be412a68fe1978026ac6497766cb4bf16..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.168704,0.00706,142.0 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/main.log deleted file mode 100644 index ae64ad3b4ddbef3b487cc73eee95d5687420c5b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:53:28,103][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:53:28,104][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:53:29,322][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:53:29,323][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:53:29,323][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:53:29,323][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:53:29,323][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:53:29,323][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:53:29,949][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:53:29,950][inference][INFO] - Running inference benchmark -[2023-09-04 10:53:30,071][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:53:30,072][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:53:30,140][inference][INFO] - + Forward pass peak memory: 468.168704 (MB) -[2023-09-04 10:53:30,142][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:53:30,143][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:53:30,218][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:53:35,242][inference][INFO] - + Forward pass latency: 7.06e-03 (s) -[2023-09-04 10:53:35,243][inference][INFO] - + Forward pass throughput: 142.00 (samples/s) -[2023-09-04 10:53:35,243][inference][INFO] - Saving inference results -[2023-09-04 10:53:35,252][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c0e59f2307cf7d3ff3ce67c1ab83984f2a2ed55b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1f36c48d28128109ed5834a16dc4a4033fbd2a17..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.16403199999996,0.00648,617.0 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/main.log deleted file mode 100644 index 725fc65dd07525cf4b860d6820925be8c9e2315e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:53:35,625][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:53:35,626][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:53:36,055][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:53:36,056][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:53:36,056][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:53:36,056][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:53:36,056][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:53:36,056][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:53:36,179][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:53:36,179][inference][INFO] - Running inference benchmark -[2023-09-04 10:53:36,311][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:53:36,313][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:53:36,363][inference][INFO] - + Forward pass peak memory: 469.16403199999996 (MB) -[2023-09-04 10:53:36,365][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:53:36,367][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:53:36,419][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:53:41,444][inference][INFO] - + Forward pass latency: 6.48e-03 (s) -[2023-09-04 10:53:41,446][inference][INFO] - + Forward pass throughput: 617.00 (samples/s) -[2023-09-04 10:53:41,446][inference][INFO] - Saving inference results -[2023-09-04 10:53:41,454][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9d39f710710d0338688ef719ce3ac0642f5f6749..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 7bd2190db9001c8156100b99486b5701c7aec0c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.33196799999996,0.00381,262.0,0.541,185.0 diff --git a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index d63f35d84ec8279740841e6481381a1f77d02070..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:34_51e1e8120bc569c3f60f7c73ff6e38a90e6229f7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 10:53:46,227][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:53:46,229][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:53:47,653][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 10:53:47,653][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:53:47,654][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:53:47,654][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:53:47,654][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:53:47,654][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:53:48,301][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:53:48,301][inference][INFO] - Running inference benchmark -[2023-09-04 10:53:48,488][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:53:48,536][inference][INFO] - + Forward pass peak memory: 469.33196799999996 (MB) -[2023-09-04 10:53:48,537][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:53:48,573][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:53:53,619][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-09-04 10:53:53,620][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-09-04 10:53:53,621][inference][INFO] - + Warming up the generation pass -[2023-09-04 10:53:54,214][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 10:53:59,629][inference][INFO] - + Generation pass latency: 5.41e-01 (s) -[2023-09-04 10:53:59,631][inference][INFO] - + Generation pass throughput: 185.00 (tokens/s) -[2023-09-04 10:53:59,631][inference][INFO] - Saving inference results -[2023-09-04 10:53:59,641][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 524aae7307065adcfaa360ff4ab687e3963ea3af..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b516aa018b5e4b72d4fb652c7c54b54d8fd2fd93..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.979712,0.006,167.0 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5f85802b5ea687d8ab9029208f84912bbb63be89..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:55:08,474][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:55:08,475][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:55:09,967][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:55:09,968][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:55:09,968][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:55:09,968][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:55:09,968][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:55:09,968][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:55:10,585][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:55:10,586][inference][INFO] - Running inference benchmark -[2023-09-04 10:55:10,710][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:55:10,712][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:55:10,782][inference][INFO] - + Forward pass peak memory: 468.979712 (MB) -[2023-09-04 10:55:10,783][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:55:10,785][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:55:10,861][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:55:15,882][inference][INFO] - + Forward pass latency: 6.00e-03 (s) -[2023-09-04 10:55:15,883][inference][INFO] - + Forward pass throughput: 167.00 (samples/s) -[2023-09-04 10:55:15,883][inference][INFO] - Saving inference results -[2023-09-04 10:55:15,892][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index b2c0eacc3b7f6e6db9ef7e92d37fc7fb46282aec..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 12bec3ab13866de8f76db536f9099768bf97d56a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.983232,0.00504,794.0 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/main.log deleted file mode 100644 index 2992487cf8919c5829c9de95e370a2bc1e43b031..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:55:16,265][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:55:16,266][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:55:16,711][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:55:16,711][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:55:16,711][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:55:16,711][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:55:16,711][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:55:16,711][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:55:16,823][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:55:16,824][inference][INFO] - Running inference benchmark -[2023-09-04 10:55:16,943][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:55:16,944][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:55:16,988][inference][INFO] - + Forward pass peak memory: 469.983232 (MB) -[2023-09-04 10:55:16,989][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:55:16,991][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:55:17,041][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:55:22,072][inference][INFO] - + Forward pass latency: 5.04e-03 (s) -[2023-09-04 10:55:22,073][inference][INFO] - + Forward pass throughput: 794.00 (samples/s) -[2023-09-04 10:55:22,073][inference][INFO] - Saving inference results -[2023-09-04 10:55:22,080][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6e7db898afa342ae91ea58af8247192d3d2bb1dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 207b58849077443c91a66eefe9cc7a28035e8e29..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.348352,0.00389,257.0,0.497,201.0 diff --git a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b6c3dce40484d6b5667055c3e1310d3833bdfd17..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:16:49_d4407a3bd13b8ec3978b9ba8e4e45cb11f230437/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 10:55:27,121][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:55:27,123][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:55:28,798][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 10:55:28,798][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:55:28,798][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:55:28,798][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:55:28,799][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:55:28,799][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:55:29,475][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:55:29,476][inference][INFO] - Running inference benchmark -[2023-09-04 10:55:29,696][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:55:29,745][inference][INFO] - + Forward pass peak memory: 469.348352 (MB) -[2023-09-04 10:55:29,746][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:55:29,785][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:55:34,828][inference][INFO] - + Forward pass latency: 3.89e-03 (s) -[2023-09-04 10:55:34,829][inference][INFO] - + Forward pass throughput: 257.00 (samples/s) -[2023-09-04 10:55:34,830][inference][INFO] - + Warming up the generation pass -[2023-09-04 10:55:35,423][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 10:55:40,893][inference][INFO] - + Generation pass latency: 4.97e-01 (s) -[2023-09-04 10:55:40,894][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-09-04 10:55:40,894][inference][INFO] - Saving inference results -[2023-09-04 10:55:40,906][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9e9cd41bbb304003c771bc02feb355ce1c627a53..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index ec487c39f1a795265922a4a9c03d8406089c223d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.50048,0.00676,148.0 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1317b1232a18f7621a69b9ac1ebb0fa0b2294133..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:56:48,960][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:56:48,961][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:56:50,299][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:56:50,299][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:56:50,300][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:56:50,300][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:56:50,300][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:56:50,300][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:56:50,919][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:56:50,920][inference][INFO] - Running inference benchmark -[2023-09-04 10:56:51,038][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:56:51,039][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:56:51,107][inference][INFO] - + Forward pass peak memory: 468.50048 (MB) -[2023-09-04 10:56:51,109][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:56:51,110][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:56:51,183][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:56:56,208][inference][INFO] - + Forward pass latency: 6.76e-03 (s) -[2023-09-04 10:56:56,209][inference][INFO] - + Forward pass throughput: 148.00 (samples/s) -[2023-09-04 10:56:56,209][inference][INFO] - Saving inference results -[2023-09-04 10:56:56,220][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1ad937fb6e3d5d8e79b2f12caad8acf08023995e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 74353357ef4fb51c3822ebf3aae2dad136247fd8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.50809599999997,0.0051,784.0 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6f474e7df9549c9d850e64be6745a7d4646938bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 10:56:56,602][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:56:56,603][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:56:57,045][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 10:56:57,045][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:56:57,046][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:56:57,046][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:56:57,046][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:56:57,046][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:56:57,302][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:56:57,303][inference][INFO] - Running inference benchmark -[2023-09-04 10:56:57,433][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:56:57,434][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:56:57,488][inference][INFO] - + Forward pass peak memory: 469.50809599999997 (MB) -[2023-09-04 10:56:57,489][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 10:56:57,491][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:56:57,553][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:57:02,583][inference][INFO] - + Forward pass latency: 5.10e-03 (s) -[2023-09-04 10:57:02,584][inference][INFO] - + Forward pass throughput: 784.00 (samples/s) -[2023-09-04 10:57:02,584][inference][INFO] - Saving inference results -[2023-09-04 10:57:02,592][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ddb774a0ea8cd6dbbb2324b524c617447b9e04bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e00e2dc87930bd10b2a6a31af2233652a7f8522c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.68012799999997,0.00463,216.0,0.493,203.0 diff --git a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index df0d355613f009cdd3c4f9ccaea6e79834ab3711..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:28:21_604a6c51ae0b4ce5e8213ea86ed9c71373223a5d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 10:57:07,445][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 10:57:07,446][benchmark][INFO] - + Setting seed(42) -[2023-09-04 10:57:08,999][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 10:57:09,000][backend][INFO] - Configuring pytorch backend -[2023-09-04 10:57:09,000][backend][INFO] - + Checking initial device isolation -[2023-09-04 10:57:09,000][backend][INFO] - + Checking contineous device isolation -[2023-09-04 10:57:09,000][pytorch][INFO] - + Disabling gradients -[2023-09-04 10:57:09,001][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 10:57:09,645][pytorch][INFO] - + Turning on eval mode -[2023-09-04 10:57:09,646][inference][INFO] - Running inference benchmark -[2023-09-04 10:57:09,888][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 10:57:09,938][inference][INFO] - + Forward pass peak memory: 469.68012799999997 (MB) -[2023-09-04 10:57:09,941][inference][INFO] - + Warming up the forward pass -[2023-09-04 10:57:09,991][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 10:57:15,031][inference][INFO] - + Forward pass latency: 4.63e-03 (s) -[2023-09-04 10:57:15,033][inference][INFO] - + Forward pass throughput: 216.00 (samples/s) -[2023-09-04 10:57:15,033][inference][INFO] - + Warming up the generation pass -[2023-09-04 10:57:15,581][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 10:57:21,008][inference][INFO] - + Generation pass latency: 4.93e-01 (s) -[2023-09-04 10:57:21,009][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-09-04 10:57:21,009][inference][INFO] - Saving inference results -[2023-09-04 10:57:21,021][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c484da065ac9cb4c968e30c7c02cb9104f9c4e89..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index df2e502ca083ced2e09aa8fe14d3caa34f83ac4e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.627456,0.00687,146.0 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/main.log deleted file mode 100644 index 12eb712e763e172b0bee6e858dab7e8687a0de54..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 12:59:07,174][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 12:59:07,175][benchmark][INFO] - + Setting seed(42) -[2023-09-04 12:59:08,428][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 12:59:08,428][backend][INFO] - Configuring pytorch backend -[2023-09-04 12:59:08,429][backend][INFO] - + Checking initial device isolation -[2023-09-04 12:59:08,429][backend][INFO] - + Checking contineous device isolation -[2023-09-04 12:59:08,429][pytorch][INFO] - + Disabling gradients -[2023-09-04 12:59:08,429][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 12:59:09,052][pytorch][INFO] - + Turning on eval mode -[2023-09-04 12:59:09,053][inference][INFO] - Running inference benchmark -[2023-09-04 12:59:09,177][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 12:59:09,178][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 12:59:09,244][inference][INFO] - + Forward pass peak memory: 468.627456 (MB) -[2023-09-04 12:59:09,245][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 12:59:09,246][inference][INFO] - + Warming up the forward pass -[2023-09-04 12:59:09,307][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 12:59:14,331][inference][INFO] - + Forward pass latency: 6.87e-03 (s) -[2023-09-04 12:59:14,332][inference][INFO] - + Forward pass throughput: 146.00 (samples/s) -[2023-09-04 12:59:14,333][inference][INFO] - Saving inference results -[2023-09-04 12:59:14,346][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a52b4f3927bcf8b1d848514349c1e80f683511ea..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9d9f22513af418ed740e855448f6c340c3a00572..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.61459199999996,0.0063,635.0 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9cdde9e0f04cc358c6cf0cee95185e7a4b489c93..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 12:59:14,742][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 12:59:14,744][benchmark][INFO] - + Setting seed(42) -[2023-09-04 12:59:15,235][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 12:59:15,236][backend][INFO] - Configuring pytorch backend -[2023-09-04 12:59:15,236][backend][INFO] - + Checking initial device isolation -[2023-09-04 12:59:15,236][backend][INFO] - + Checking contineous device isolation -[2023-09-04 12:59:15,236][pytorch][INFO] - + Disabling gradients -[2023-09-04 12:59:15,236][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 12:59:15,369][pytorch][INFO] - + Turning on eval mode -[2023-09-04 12:59:15,369][inference][INFO] - Running inference benchmark -[2023-09-04 12:59:15,493][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 12:59:15,494][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 12:59:15,542][inference][INFO] - + Forward pass peak memory: 469.61459199999996 (MB) -[2023-09-04 12:59:15,542][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 12:59:15,544][inference][INFO] - + Warming up the forward pass -[2023-09-04 12:59:15,607][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 12:59:20,633][inference][INFO] - + Forward pass latency: 6.30e-03 (s) -[2023-09-04 12:59:20,634][inference][INFO] - + Forward pass throughput: 635.00 (samples/s) -[2023-09-04 12:59:20,634][inference][INFO] - Saving inference results -[2023-09-04 12:59:20,642][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 03e4fb974410fa41e145f24069352d491674f5e5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 295c806ddd88b898c5482b2cf74950da7e512a68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.307392,0.00312,321.0,0.488,205.0 diff --git a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9e1170d5b3b72d5d0b2e41423908e7d022f43a8e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:53:41_f435003e0c2dd152a2117d11c0ab6fcd4f2d84c0/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 12:59:25,442][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 12:59:25,443][benchmark][INFO] - + Setting seed(42) -[2023-09-04 12:59:26,878][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 12:59:26,878][backend][INFO] - Configuring pytorch backend -[2023-09-04 12:59:26,879][backend][INFO] - + Checking initial device isolation -[2023-09-04 12:59:26,879][backend][INFO] - + Checking contineous device isolation -[2023-09-04 12:59:26,879][pytorch][INFO] - + Disabling gradients -[2023-09-04 12:59:26,879][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 12:59:27,523][pytorch][INFO] - + Turning on eval mode -[2023-09-04 12:59:27,523][inference][INFO] - Running inference benchmark -[2023-09-04 12:59:27,723][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 12:59:27,773][inference][INFO] - + Forward pass peak memory: 469.307392 (MB) -[2023-09-04 12:59:27,774][inference][INFO] - + Warming up the forward pass -[2023-09-04 12:59:27,808][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 12:59:32,857][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-09-04 12:59:32,859][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-09-04 12:59:32,859][inference][INFO] - + Warming up the generation pass -[2023-09-04 12:59:33,350][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 12:59:38,715][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-09-04 12:59:38,717][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-09-04 12:59:38,717][inference][INFO] - Saving inference results -[2023-09-04 12:59:38,734][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b9899ac89fb7a3f048112b23df2b5af1fd478c6b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e33d4d6d05bdc4d4bb5e530cd8a2d1aca74bd494..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.267008,0.00593,169.0 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/main.log deleted file mode 100644 index cc3a7eb2706a716a5034ffdc411bf809a4cc526a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 13:00:49,626][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 13:00:49,627][benchmark][INFO] - + Setting seed(42) -[2023-09-04 13:00:50,882][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 13:00:50,882][backend][INFO] - Configuring pytorch backend -[2023-09-04 13:00:50,882][backend][INFO] - + Checking initial device isolation -[2023-09-04 13:00:50,882][backend][INFO] - + Checking contineous device isolation -[2023-09-04 13:00:50,883][pytorch][INFO] - + Disabling gradients -[2023-09-04 13:00:50,883][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 13:00:51,500][pytorch][INFO] - + Turning on eval mode -[2023-09-04 13:00:51,501][inference][INFO] - Running inference benchmark -[2023-09-04 13:00:51,628][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 13:00:51,629][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 13:00:51,699][inference][INFO] - + Forward pass peak memory: 468.267008 (MB) -[2023-09-04 13:00:51,700][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 13:00:51,702][inference][INFO] - + Warming up the forward pass -[2023-09-04 13:00:51,763][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 13:00:56,787][inference][INFO] - + Forward pass latency: 5.93e-03 (s) -[2023-09-04 13:00:56,788][inference][INFO] - + Forward pass throughput: 169.00 (samples/s) -[2023-09-04 13:00:56,788][inference][INFO] - Saving inference results -[2023-09-04 13:00:56,800][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d7015837cdc7102bbb999509f5ee7bfbb0d47d2f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 4ed7f78b500842d0b91f4358bdc4578e74862a24..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.21727999999996,0.00507,789.0 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/main.log deleted file mode 100644 index 489b6e094205344dc471b89dbcb53f4d0941c045..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 13:00:57,186][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 13:00:57,187][benchmark][INFO] - + Setting seed(42) -[2023-09-04 13:00:57,745][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 13:00:57,745][backend][INFO] - Configuring pytorch backend -[2023-09-04 13:00:57,746][backend][INFO] - + Checking initial device isolation -[2023-09-04 13:00:57,746][backend][INFO] - + Checking contineous device isolation -[2023-09-04 13:00:57,746][pytorch][INFO] - + Disabling gradients -[2023-09-04 13:00:57,746][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 13:00:57,864][pytorch][INFO] - + Turning on eval mode -[2023-09-04 13:00:57,864][inference][INFO] - Running inference benchmark -[2023-09-04 13:00:57,985][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 13:00:57,986][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 13:00:58,033][inference][INFO] - + Forward pass peak memory: 469.21727999999996 (MB) -[2023-09-04 13:00:58,034][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 13:00:58,036][inference][INFO] - + Warming up the forward pass -[2023-09-04 13:00:58,088][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 13:01:03,118][inference][INFO] - + Forward pass latency: 5.07e-03 (s) -[2023-09-04 13:01:03,119][inference][INFO] - + Forward pass throughput: 789.00 (samples/s) -[2023-09-04 13:01:03,119][inference][INFO] - Saving inference results -[2023-09-04 13:01:03,132][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 57ed921b4f09c792e434c8f09d72d0ae95048033..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d6057dbecfa67e6a5f7da66985687e94df39009b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.184512,0.00325,308.0,0.491,204.0 diff --git a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e391c2dcca1b35d5cd85a902d0807aca197073ba..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_10:54:03_eb984418e2f26f749e832730b264d7762e6be8c2/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 13:01:07,889][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 13:01:07,890][benchmark][INFO] - + Setting seed(42) -[2023-09-04 13:01:09,414][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 13:01:09,415][backend][INFO] - Configuring pytorch backend -[2023-09-04 13:01:09,415][backend][INFO] - + Checking initial device isolation -[2023-09-04 13:01:09,415][backend][INFO] - + Checking contineous device isolation -[2023-09-04 13:01:09,415][pytorch][INFO] - + Disabling gradients -[2023-09-04 13:01:09,415][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 13:01:10,061][pytorch][INFO] - + Turning on eval mode -[2023-09-04 13:01:10,062][inference][INFO] - Running inference benchmark -[2023-09-04 13:01:10,251][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 13:01:10,297][inference][INFO] - + Forward pass peak memory: 469.184512 (MB) -[2023-09-04 13:01:10,299][inference][INFO] - + Warming up the forward pass -[2023-09-04 13:01:10,339][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 13:01:15,389][inference][INFO] - + Forward pass latency: 3.25e-03 (s) -[2023-09-04 13:01:15,390][inference][INFO] - + Forward pass throughput: 308.00 (samples/s) -[2023-09-04 13:01:15,391][inference][INFO] - + Warming up the generation pass -[2023-09-04 13:01:15,896][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 13:01:21,294][inference][INFO] - + Generation pass latency: 4.91e-01 (s) -[2023-09-04 13:01:21,296][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-09-04 13:01:21,296][inference][INFO] - Saving inference results -[2023-09-04 13:01:21,308][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e266d38a37a5bba25df3935b0207954190625d96..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index d88414505bfbb3584a0d7a65dbf2d6de12e3effa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.71347199999997,0.0059,169.0 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/main.log deleted file mode 100644 index ffb2a0d34a390e912856259f2e45bcaaeaa618cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 14:50:02,404][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 14:50:02,405][benchmark][INFO] - + Setting seed(42) -[2023-09-04 14:50:03,628][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 14:50:03,628][backend][INFO] - Configuring pytorch backend -[2023-09-04 14:50:03,628][backend][INFO] - + Checking initial device isolation -[2023-09-04 14:50:03,629][backend][INFO] - + Checking contineous device isolation -[2023-09-04 14:50:03,629][pytorch][INFO] - + Disabling gradients -[2023-09-04 14:50:03,629][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 14:50:04,412][pytorch][INFO] - + Turning on eval mode -[2023-09-04 14:50:04,412][inference][INFO] - Running inference benchmark -[2023-09-04 14:50:04,536][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:50:04,537][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 14:50:04,605][inference][INFO] - + Forward pass peak memory: 468.71347199999997 (MB) -[2023-09-04 14:50:04,606][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:50:04,608][inference][INFO] - + Warming up the forward pass -[2023-09-04 14:50:04,681][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 14:50:09,707][inference][INFO] - + Forward pass latency: 5.90e-03 (s) -[2023-09-04 14:50:09,708][inference][INFO] - + Forward pass throughput: 169.00 (samples/s) -[2023-09-04 14:50:09,708][inference][INFO] - Saving inference results -[2023-09-04 14:50:09,718][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 9ca63097155351223a69410f6d35bab6872edee8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d48157fadfc361c811f4e43e8af9b2cf0bd2634b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.655552,0.00503,795.0 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/main.log deleted file mode 100644 index b7c42d1b999f77de0463ac612d1646d2ca189fed..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 14:50:10,103][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 14:50:10,104][benchmark][INFO] - + Setting seed(42) -[2023-09-04 14:50:10,557][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 14:50:10,557][backend][INFO] - Configuring pytorch backend -[2023-09-04 14:50:10,558][backend][INFO] - + Checking initial device isolation -[2023-09-04 14:50:10,558][backend][INFO] - + Checking contineous device isolation -[2023-09-04 14:50:10,558][pytorch][INFO] - + Disabling gradients -[2023-09-04 14:50:10,558][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 14:50:10,675][pytorch][INFO] - + Turning on eval mode -[2023-09-04 14:50:10,676][inference][INFO] - Running inference benchmark -[2023-09-04 14:50:10,798][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:50:10,799][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 14:50:10,843][inference][INFO] - + Forward pass peak memory: 469.655552 (MB) -[2023-09-04 14:50:10,844][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:50:10,846][inference][INFO] - + Warming up the forward pass -[2023-09-04 14:50:10,896][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 14:50:15,924][inference][INFO] - + Forward pass latency: 5.03e-03 (s) -[2023-09-04 14:50:15,925][inference][INFO] - + Forward pass throughput: 795.00 (samples/s) -[2023-09-04 14:50:15,925][inference][INFO] - Saving inference results -[2023-09-04 14:50:15,932][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index cf64638022bac223309edec88c14a69f018512d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 43512935d26a44fe8f739cb44bf0deb695bde7f5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.79071999999996,0.0039,256.0,0.505,198.0 diff --git a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2d4f671ec3cb2a6fddf0785b4e2768d9f3922d9f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_13:03:48_bfb1895e3346cb8a2bf2560c75d45e70edf46a47/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 14:50:20,830][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 14:50:20,831][benchmark][INFO] - + Setting seed(42) -[2023-09-04 14:50:22,429][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 14:50:22,430][backend][INFO] - Configuring pytorch backend -[2023-09-04 14:50:22,430][backend][INFO] - + Checking initial device isolation -[2023-09-04 14:50:22,430][backend][INFO] - + Checking contineous device isolation -[2023-09-04 14:50:22,430][pytorch][INFO] - + Disabling gradients -[2023-09-04 14:50:22,431][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 14:50:23,135][pytorch][INFO] - + Turning on eval mode -[2023-09-04 14:50:23,136][inference][INFO] - Running inference benchmark -[2023-09-04 14:50:23,336][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 14:50:23,391][inference][INFO] - + Forward pass peak memory: 469.79071999999996 (MB) -[2023-09-04 14:50:23,393][inference][INFO] - + Warming up the forward pass -[2023-09-04 14:50:23,427][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 14:50:28,475][inference][INFO] - + Forward pass latency: 3.90e-03 (s) -[2023-09-04 14:50:28,476][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-09-04 14:50:28,477][inference][INFO] - + Warming up the generation pass -[2023-09-04 14:50:29,024][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 14:50:34,078][inference][INFO] - + Generation pass latency: 5.05e-01 (s) -[2023-09-04 14:50:34,080][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s) -[2023-09-04 14:50:34,080][inference][INFO] - Saving inference results -[2023-09-04 14:50:34,092][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 271a47fc80ce9b0d68eb2e8c3949dd681462acf4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 4c1a2c6f98c3ba7c32b991e44985423e4776659c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.721664,0.00697,143.0 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0d1a712dadeb4f1b313a28f5e9def6b542904939..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 14:51:43,157][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 14:51:43,158][benchmark][INFO] - + Setting seed(42) -[2023-09-04 14:51:44,379][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 14:51:44,379][backend][INFO] - Configuring pytorch backend -[2023-09-04 14:51:44,380][backend][INFO] - + Checking initial device isolation -[2023-09-04 14:51:44,380][backend][INFO] - + Checking contineous device isolation -[2023-09-04 14:51:44,380][pytorch][INFO] - + Disabling gradients -[2023-09-04 14:51:44,380][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 14:51:44,991][pytorch][INFO] - + Turning on eval mode -[2023-09-04 14:51:44,992][inference][INFO] - Running inference benchmark -[2023-09-04 14:51:45,113][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:51:45,114][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 14:51:45,190][inference][INFO] - + Forward pass peak memory: 468.721664 (MB) -[2023-09-04 14:51:45,191][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:51:45,193][inference][INFO] - + Warming up the forward pass -[2023-09-04 14:51:45,266][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 14:51:50,286][inference][INFO] - + Forward pass latency: 6.97e-03 (s) -[2023-09-04 14:51:50,287][inference][INFO] - + Forward pass throughput: 143.00 (samples/s) -[2023-09-04 14:51:50,287][inference][INFO] - Saving inference results -[2023-09-04 14:51:50,296][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5ba08a14b0e5d6c485ff1587474b8792ab7f6cef..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1d90bb9845c7a500b0639ab3858826817dcd5e30..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.704704,0.00603,663.0 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/main.log deleted file mode 100644 index 18a533c976462f3987310b2b52c7064601d918a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 14:51:50,694][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 14:51:50,696][benchmark][INFO] - + Setting seed(42) -[2023-09-04 14:51:51,253][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 14:51:51,254][backend][INFO] - Configuring pytorch backend -[2023-09-04 14:51:51,254][backend][INFO] - + Checking initial device isolation -[2023-09-04 14:51:51,254][backend][INFO] - + Checking contineous device isolation -[2023-09-04 14:51:51,254][pytorch][INFO] - + Disabling gradients -[2023-09-04 14:51:51,254][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 14:51:51,396][pytorch][INFO] - + Turning on eval mode -[2023-09-04 14:51:51,397][inference][INFO] - Running inference benchmark -[2023-09-04 14:51:51,571][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:51:51,572][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 14:51:51,625][inference][INFO] - + Forward pass peak memory: 469.704704 (MB) -[2023-09-04 14:51:51,626][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 14:51:51,627][inference][INFO] - + Warming up the forward pass -[2023-09-04 14:51:51,696][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 14:51:56,725][inference][INFO] - + Forward pass latency: 6.03e-03 (s) -[2023-09-04 14:51:56,726][inference][INFO] - + Forward pass throughput: 663.00 (samples/s) -[2023-09-04 14:51:56,726][inference][INFO] - Saving inference results -[2023-09-04 14:51:56,733][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 22a4206e60bd04860eb916f49720295b4d1400be..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 6d7c8dde2c85eb8fa9fc09b618007d4446cdc23b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.17222399999997,0.00391,256.0,0.527,190.0 diff --git a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 15856a19c38adf917b65e032364dfeeb07f75962..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_14:11:00_7cd01d4e384f7ce9c18a81a4decb2c2531542661/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 14:52:01,527][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 14:52:01,528][benchmark][INFO] - + Setting seed(42) -[2023-09-04 14:52:02,946][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 14:52:02,947][backend][INFO] - Configuring pytorch backend -[2023-09-04 14:52:02,947][backend][INFO] - + Checking initial device isolation -[2023-09-04 14:52:02,947][backend][INFO] - + Checking contineous device isolation -[2023-09-04 14:52:02,947][pytorch][INFO] - + Disabling gradients -[2023-09-04 14:52:02,947][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 14:52:03,617][pytorch][INFO] - + Turning on eval mode -[2023-09-04 14:52:03,617][inference][INFO] - Running inference benchmark -[2023-09-04 14:52:03,815][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 14:52:03,864][inference][INFO] - + Forward pass peak memory: 469.17222399999997 (MB) -[2023-09-04 14:52:03,865][inference][INFO] - + Warming up the forward pass -[2023-09-04 14:52:03,899][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 14:52:08,943][inference][INFO] - + Forward pass latency: 3.91e-03 (s) -[2023-09-04 14:52:08,945][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-09-04 14:52:08,945][inference][INFO] - + Warming up the generation pass -[2023-09-04 14:52:09,536][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 14:52:14,806][inference][INFO] - + Generation pass latency: 5.27e-01 (s) -[2023-09-04 14:52:14,807][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s) -[2023-09-04 14:52:14,807][inference][INFO] - Saving inference results -[2023-09-04 14:52:14,819][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5a38fa2e37530c9ff6bb5d4c7375be5a3cf802cb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 80999045528e0dc5333dfbbcbc6fc3a4fd6e2b09..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.82406399999996,0.00583,172.0 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 188d3a29154a8175000796089d94324692fc3614..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 16:49:59,326][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 16:49:59,328][benchmark][INFO] - + Setting seed(42) -[2023-09-04 16:50:00,571][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 16:50:00,572][backend][INFO] - Configuring pytorch backend -[2023-09-04 16:50:00,572][backend][INFO] - + Checking initial device isolation -[2023-09-04 16:50:00,572][backend][INFO] - + Checking contineous device isolation -[2023-09-04 16:50:00,572][pytorch][INFO] - + Disabling gradients -[2023-09-04 16:50:00,572][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 16:50:01,243][pytorch][INFO] - + Turning on eval mode -[2023-09-04 16:50:01,244][inference][INFO] - Running inference benchmark -[2023-09-04 16:50:01,372][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:50:01,373][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 16:50:01,439][inference][INFO] - + Forward pass peak memory: 468.82406399999996 (MB) -[2023-09-04 16:50:01,441][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:50:01,442][inference][INFO] - + Warming up the forward pass -[2023-09-04 16:50:01,514][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 16:50:06,542][inference][INFO] - + Forward pass latency: 5.83e-03 (s) -[2023-09-04 16:50:06,543][inference][INFO] - + Forward pass throughput: 172.00 (samples/s) -[2023-09-04 16:50:06,543][inference][INFO] - Saving inference results -[2023-09-04 16:50:06,556][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 71bd77be7395a125f277cec6815d03ab597c317c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ecc1b5e6a556d2306fb8798675783d0c8ff3eb4b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.864448,0.00495,808.0 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/main.log deleted file mode 100644 index dcae00fa35842ef3ad6e5585c834a11f3bf8342a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 16:50:06,933][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 16:50:06,934][benchmark][INFO] - + Setting seed(42) -[2023-09-04 16:50:07,374][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 16:50:07,375][backend][INFO] - Configuring pytorch backend -[2023-09-04 16:50:07,375][backend][INFO] - + Checking initial device isolation -[2023-09-04 16:50:07,375][backend][INFO] - + Checking contineous device isolation -[2023-09-04 16:50:07,375][pytorch][INFO] - + Disabling gradients -[2023-09-04 16:50:07,375][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 16:50:07,495][pytorch][INFO] - + Turning on eval mode -[2023-09-04 16:50:07,495][inference][INFO] - Running inference benchmark -[2023-09-04 16:50:07,614][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:50:07,615][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 16:50:07,661][inference][INFO] - + Forward pass peak memory: 469.864448 (MB) -[2023-09-04 16:50:07,662][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:50:07,664][inference][INFO] - + Warming up the forward pass -[2023-09-04 16:50:07,714][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 16:50:12,745][inference][INFO] - + Forward pass latency: 4.95e-03 (s) -[2023-09-04 16:50:12,746][inference][INFO] - + Forward pass throughput: 808.00 (samples/s) -[2023-09-04 16:50:12,746][inference][INFO] - Saving inference results -[2023-09-04 16:50:12,754][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index af73e29365e7028fd283e095f81a79d5477fa121..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c53f2fb71f49e1b636251d7fa0b602e3f406fd82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.929984,0.00342,292.0,0.49,204.0 diff --git a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b02038310ac9342926ca108055bec60514228def..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:09:26_d750eff62757a46160b6f73b95e8035c49c2971b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 16:50:17,612][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 16:50:17,613][benchmark][INFO] - + Setting seed(42) -[2023-09-04 16:50:19,152][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 16:50:19,152][backend][INFO] - Configuring pytorch backend -[2023-09-04 16:50:19,153][backend][INFO] - + Checking initial device isolation -[2023-09-04 16:50:19,153][backend][INFO] - + Checking contineous device isolation -[2023-09-04 16:50:19,153][pytorch][INFO] - + Disabling gradients -[2023-09-04 16:50:19,153][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 16:50:19,937][pytorch][INFO] - + Turning on eval mode -[2023-09-04 16:50:19,938][inference][INFO] - Running inference benchmark -[2023-09-04 16:50:20,139][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 16:50:20,186][inference][INFO] - + Forward pass peak memory: 469.929984 (MB) -[2023-09-04 16:50:20,188][inference][INFO] - + Warming up the forward pass -[2023-09-04 16:50:20,220][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 16:50:25,268][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-09-04 16:50:25,270][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-09-04 16:50:25,271][inference][INFO] - + Warming up the generation pass -[2023-09-04 16:50:25,778][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 16:50:31,172][inference][INFO] - + Generation pass latency: 4.90e-01 (s) -[2023-09-04 16:50:31,173][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-09-04 16:50:31,173][inference][INFO] - Saving inference results -[2023-09-04 16:50:31,186][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c6bdcdd9869418c45af3378e18e3ab3a6d062cb8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 72273daaf8fbe6d9d41c46509ec5a9d0fb370e53..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.578304,0.00596,168.0 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/main.log deleted file mode 100644 index 46910291af41c4cc1c75af31b648f4c185ad4ee2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 16:51:40,171][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 16:51:40,172][benchmark][INFO] - + Setting seed(42) -[2023-09-04 16:51:41,400][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 16:51:41,401][backend][INFO] - Configuring pytorch backend -[2023-09-04 16:51:41,401][backend][INFO] - + Checking initial device isolation -[2023-09-04 16:51:41,401][backend][INFO] - + Checking contineous device isolation -[2023-09-04 16:51:41,401][pytorch][INFO] - + Disabling gradients -[2023-09-04 16:51:41,401][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 16:51:42,020][pytorch][INFO] - + Turning on eval mode -[2023-09-04 16:51:42,021][inference][INFO] - Running inference benchmark -[2023-09-04 16:51:42,150][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:51:42,151][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 16:51:42,217][inference][INFO] - + Forward pass peak memory: 468.578304 (MB) -[2023-09-04 16:51:42,218][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:51:42,220][inference][INFO] - + Warming up the forward pass -[2023-09-04 16:51:42,280][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 16:51:47,302][inference][INFO] - + Forward pass latency: 5.96e-03 (s) -[2023-09-04 16:51:47,303][inference][INFO] - + Forward pass throughput: 168.00 (samples/s) -[2023-09-04 16:51:47,303][inference][INFO] - Saving inference results -[2023-09-04 16:51:47,314][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cb79edbfb14d8afe9a86da0ad6244563587e4982..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1b80b029b860eea2d0c9cc97e8439280dc84c98a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.630976,0.00507,789.0 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/main.log deleted file mode 100644 index b6a5f741bdbc4c2e0211fc2ebbe048729c0f54d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 16:51:47,693][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 16:51:47,694][benchmark][INFO] - + Setting seed(42) -[2023-09-04 16:51:48,134][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 16:51:48,135][backend][INFO] - Configuring pytorch backend -[2023-09-04 16:51:48,135][backend][INFO] - + Checking initial device isolation -[2023-09-04 16:51:48,135][backend][INFO] - + Checking contineous device isolation -[2023-09-04 16:51:48,135][pytorch][INFO] - + Disabling gradients -[2023-09-04 16:51:48,136][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 16:51:48,254][pytorch][INFO] - + Turning on eval mode -[2023-09-04 16:51:48,255][inference][INFO] - Running inference benchmark -[2023-09-04 16:51:48,381][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:51:48,382][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 16:51:48,429][inference][INFO] - + Forward pass peak memory: 469.630976 (MB) -[2023-09-04 16:51:48,430][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 16:51:48,432][inference][INFO] - + Warming up the forward pass -[2023-09-04 16:51:48,490][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 16:51:53,517][inference][INFO] - + Forward pass latency: 5.07e-03 (s) -[2023-09-04 16:51:53,518][inference][INFO] - + Forward pass throughput: 789.00 (samples/s) -[2023-09-04 16:51:53,518][inference][INFO] - Saving inference results -[2023-09-04 16:51:53,525][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6e09b6e4a7503fdb809df3d6148fcf1cc95fd86d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e077afed5b16304768da608981bba3196ca10396..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.38111999999995,0.00387,258.0,0.512,195.0 diff --git a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 60b8416cb0bc7fb55b58f68e35002b954b704844..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:18:34_034bc5d26ad7c0e284265d92d3da39d786138545/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 16:51:58,320][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 16:51:58,321][benchmark][INFO] - + Setting seed(42) -[2023-09-04 16:52:00,241][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 16:52:00,242][backend][INFO] - Configuring pytorch backend -[2023-09-04 16:52:00,242][backend][INFO] - + Checking initial device isolation -[2023-09-04 16:52:00,242][backend][INFO] - + Checking contineous device isolation -[2023-09-04 16:52:00,243][pytorch][INFO] - + Disabling gradients -[2023-09-04 16:52:00,243][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 16:52:00,886][pytorch][INFO] - + Turning on eval mode -[2023-09-04 16:52:00,887][inference][INFO] - Running inference benchmark -[2023-09-04 16:52:01,084][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 16:52:01,132][inference][INFO] - + Forward pass peak memory: 469.38111999999995 (MB) -[2023-09-04 16:52:01,133][inference][INFO] - + Warming up the forward pass -[2023-09-04 16:52:01,165][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 16:52:06,211][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-09-04 16:52:06,212][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-09-04 16:52:06,213][inference][INFO] - + Warming up the generation pass -[2023-09-04 16:52:06,809][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 16:52:11,928][inference][INFO] - + Generation pass latency: 5.12e-01 (s) -[2023-09-04 16:52:11,929][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-09-04 16:52:11,929][inference][INFO] - Saving inference results -[2023-09-04 16:52:11,942][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f563ae5222aca0cd15d6da5e5eccdb4b378559f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 02537241820032cdd0cfbf8c7351c659db700982..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.959232,0.00622,161.0 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9237aa508b8c7113c6bb9ed0477f81e8193fab5a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:50:12,935][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:50:12,937][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:50:14,143][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:50:14,144][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:50:14,144][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:50:14,144][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:50:14,144][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:50:14,145][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:50:14,757][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:50:14,758][inference][INFO] - Running inference benchmark -[2023-09-04 18:50:14,884][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:50:14,886][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:50:14,954][inference][INFO] - + Forward pass peak memory: 468.959232 (MB) -[2023-09-04 18:50:14,955][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:50:14,957][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:50:15,031][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:50:20,057][inference][INFO] - + Forward pass latency: 6.22e-03 (s) -[2023-09-04 18:50:20,058][inference][INFO] - + Forward pass throughput: 161.00 (samples/s) -[2023-09-04 18:50:20,058][inference][INFO] - Saving inference results -[2023-09-04 18:50:20,069][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 560460cce08acaf810d205f8bf41e3462ba87764..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 816c94392b021d1218ddb82dbca92c8253a64780..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,469.884928,0.0051,784.0 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 33313c250ddf7bbbe72a9c2790dcaae42c79d379..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:50:20,439][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:50:20,440][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:50:20,882][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:50:20,882][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:50:20,882][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:50:20,882][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:50:20,883][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:50:20,883][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:50:20,997][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:50:20,997][inference][INFO] - Running inference benchmark -[2023-09-04 18:50:21,125][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:50:21,126][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:50:21,176][inference][INFO] - + Forward pass peak memory: 469.884928 (MB) -[2023-09-04 18:50:21,177][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:50:21,179][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:50:21,258][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:50:26,288][inference][INFO] - + Forward pass latency: 5.10e-03 (s) -[2023-09-04 18:50:26,289][inference][INFO] - + Forward pass throughput: 784.00 (samples/s) -[2023-09-04 18:50:26,289][inference][INFO] - Saving inference results -[2023-09-04 18:50:26,295][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 04b8456d9ce672ec06e6446e12347e35976bc84e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 13f60a5ab36fed2ba96a12e98a79282da07fbff0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.54905599999995,0.00402,249.0,0.537,186.0 diff --git a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ffd4df15673437828db7309f91a5bae18dfa7f41..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:53:50_3a479672ea95b058b621dcdcd1d15b73f36dc25a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 18:50:31,062][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:50:31,063][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:50:32,502][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 18:50:32,502][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:50:32,503][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:50:32,503][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:50:32,503][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:50:32,503][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:50:33,147][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:50:33,148][inference][INFO] - Running inference benchmark -[2023-09-04 18:50:33,351][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:50:33,401][inference][INFO] - + Forward pass peak memory: 469.54905599999995 (MB) -[2023-09-04 18:50:33,402][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:50:33,440][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:50:38,482][inference][INFO] - + Forward pass latency: 4.02e-03 (s) -[2023-09-04 18:50:38,483][inference][INFO] - + Forward pass throughput: 249.00 (samples/s) -[2023-09-04 18:50:38,484][inference][INFO] - + Warming up the generation pass -[2023-09-04 18:50:39,080][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 18:50:44,448][inference][INFO] - + Generation pass latency: 5.37e-01 (s) -[2023-09-04 18:50:44,449][inference][INFO] - + Generation pass throughput: 186.00 (tokens/s) -[2023-09-04 18:50:44,449][inference][INFO] - Saving inference results -[2023-09-04 18:50:44,464][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 58e4aeeb8dbd5df55930f3a6818f7a1829fd1b6f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index feaf5a5e0d86d1dc5c29d78709e64431506420df..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.18156799999997,0.00326,307.0 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 21f75cc95887e5782b992cb1013d4c2a4850dbaa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:51:53,568][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:51:53,570][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:51:54,843][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:51:54,843][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:51:54,843][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:51:54,844][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:51:54,844][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:51:54,844][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:51:55,468][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:51:55,469][inference][INFO] - Running inference benchmark -[2023-09-04 18:51:55,594][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:51:55,595][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:51:55,657][inference][INFO] - + Forward pass peak memory: 467.18156799999997 (MB) -[2023-09-04 18:51:55,658][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:51:55,659][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:51:55,696][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:52:00,745][inference][INFO] - + Forward pass latency: 3.26e-03 (s) -[2023-09-04 18:52:00,747][inference][INFO] - + Forward pass throughput: 307.00 (samples/s) -[2023-09-04 18:52:00,747][inference][INFO] - Saving inference results -[2023-09-04 18:52:00,760][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 927e504afb18f5010ee14b1babef573332f21c72..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 83010e1124c8c59f08211994ab483267ade16ed4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.11136,0.00354,1130.0 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 848c79603591b15dac45f532fac12cd6029a21dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:52:01,165][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:52:01,166][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:52:01,621][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:52:01,622][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:52:01,622][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:52:01,622][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:52:01,622][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:52:01,622][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:52:01,736][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:52:01,736][inference][INFO] - Running inference benchmark -[2023-09-04 18:52:01,863][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:52:01,864][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:52:01,908][inference][INFO] - + Forward pass peak memory: 468.11136 (MB) -[2023-09-04 18:52:01,909][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:52:01,910][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:52:01,947][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:52:06,992][inference][INFO] - + Forward pass latency: 3.54e-03 (s) -[2023-09-04 18:52:06,993][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-09-04 18:52:06,993][inference][INFO] - Saving inference results -[2023-09-04 18:52:07,000][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 4fd0bcff73fe32f32b9bafd412f908f1691e9d2a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 36c046542ad7ed6a3faf37e7c1a3816a1371a81a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.434368,0.00389,257.0,0.483,207.0 diff --git a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 344b9b71c8e8b86f041d44b361558b790602c473..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_16:54:34_44d2c199f6c5fcc93b662d6d3aaa02f8e79c9c4b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 18:52:11,906][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:52:11,907][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:52:13,325][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 18:52:13,326][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:52:13,326][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:52:13,326][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:52:13,326][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:52:13,326][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:52:13,967][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:52:13,967][inference][INFO] - Running inference benchmark -[2023-09-04 18:52:14,173][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:52:14,221][inference][INFO] - + Forward pass peak memory: 469.434368 (MB) -[2023-09-04 18:52:14,222][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:52:14,260][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:52:19,305][inference][INFO] - + Forward pass latency: 3.89e-03 (s) -[2023-09-04 18:52:19,307][inference][INFO] - + Forward pass throughput: 257.00 (samples/s) -[2023-09-04 18:52:19,308][inference][INFO] - + Warming up the generation pass -[2023-09-04 18:52:19,805][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 18:52:25,123][inference][INFO] - + Generation pass latency: 4.83e-01 (s) -[2023-09-04 18:52:25,125][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-09-04 18:52:25,125][inference][INFO] - Saving inference results -[2023-09-04 18:52:25,138][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f9e35078dd2cde79b8e6037969b67e694a87d207..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 82f714ee939b7d8e77a613542b95e896d366845f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.68947199999997,0.00357,280.0 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 946c9b9d7a4a0307f1ef359f7923129e9fd30f60..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:53:33,488][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:53:33,489][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:53:34,725][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:53:34,725][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:53:34,725][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:53:34,726][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:53:34,726][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:53:34,726][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:53:35,357][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:53:35,358][inference][INFO] - Running inference benchmark -[2023-09-04 18:53:35,486][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:53:35,487][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:53:35,550][inference][INFO] - + Forward pass peak memory: 467.68947199999997 (MB) -[2023-09-04 18:53:35,551][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:53:35,553][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:53:35,585][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:53:40,630][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-09-04 18:53:40,632][inference][INFO] - + Forward pass throughput: 280.00 (samples/s) -[2023-09-04 18:53:40,632][inference][INFO] - Saving inference results -[2023-09-04 18:53:40,643][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4e9c1101a3eb004064a3a27a02b94a6492542daf..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 69cf6354cafba38e38d4577ea803a914fc18de85..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.64793599999996,0.00342,1170.0 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/main.log deleted file mode 100644 index 0a6eee3537d28b24f0402bac6fa936d4ff586bad..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:53:41,020][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:53:41,022][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:53:41,465][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:53:41,465][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:53:41,466][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:53:41,466][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:53:41,466][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:53:41,466][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:53:41,584][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:53:41,584][inference][INFO] - Running inference benchmark -[2023-09-04 18:53:41,709][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:53:41,710][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:53:41,752][inference][INFO] - + Forward pass peak memory: 468.64793599999996 (MB) -[2023-09-04 18:53:41,754][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:53:41,755][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:53:41,794][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:53:46,842][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-09-04 18:53:46,843][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-09-04 18:53:46,843][inference][INFO] - Saving inference results -[2023-09-04 18:53:46,853][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e8f87a383ab6fd4f449db8858de82e7d18729611..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index bd731e6c95b11b16469f1d3adcd2da2276b5336a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.56134399999996,0.00374,267.0,0.528,189.0 diff --git a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 28b96a99cfa0ce49a9018c838ec133bde366b354..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_17:16:10_040c4613c2fac59f16e333a630d9a69b6ff9ca5d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 18:53:51,782][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:53:51,783][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:53:53,481][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 18:53:53,481][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:53:53,481][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:53:53,482][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:53:53,482][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:53:53,482][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:53:54,149][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:53:54,149][inference][INFO] - Running inference benchmark -[2023-09-04 18:53:54,365][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:53:54,413][inference][INFO] - + Forward pass peak memory: 469.56134399999996 (MB) -[2023-09-04 18:53:54,414][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:53:54,446][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:53:59,490][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-09-04 18:53:59,492][inference][INFO] - + Forward pass throughput: 267.00 (samples/s) -[2023-09-04 18:53:59,493][inference][INFO] - + Warming up the generation pass -[2023-09-04 18:54:00,078][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 18:54:05,359][inference][INFO] - + Generation pass latency: 5.28e-01 (s) -[2023-09-04 18:54:05,360][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s) -[2023-09-04 18:54:05,360][inference][INFO] - Saving inference results -[2023-09-04 18:54:05,373][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 44ecb346bf5120c0ad58f74929cff8904703d56d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 5f06acd287c4036ed209478962bc557c2c0d5c00..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.070976,0.00318,314.0 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/main.log deleted file mode 100644 index a8ffa9b70388be170ed42a7adc46ea4338505147..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:55:13,948][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:55:13,950][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:55:15,195][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:55:15,195][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:55:15,195][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:55:15,195][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:55:15,195][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:55:15,196][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:55:15,820][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:55:15,820][inference][INFO] - Running inference benchmark -[2023-09-04 18:55:15,937][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:55:15,939][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:55:16,004][inference][INFO] - + Forward pass peak memory: 467.070976 (MB) -[2023-09-04 18:55:16,005][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:55:16,007][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:55:16,040][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:55:21,091][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-09-04 18:55:21,092][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-09-04 18:55:21,093][inference][INFO] - Saving inference results -[2023-09-04 18:55:21,103][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 586687781fb2332621db59eb4f1e318e26fb8b9e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 61cce2d6b1ccae8bc32686040ef330765ad884a6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.08678399999997,0.00359,1110.0 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/main.log deleted file mode 100644 index 103292ed53285ea913a87000228fd4e0e0bb3f55..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:55:21,465][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:55:21,466][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:55:21,911][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:55:21,912][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:55:21,912][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:55:21,912][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:55:21,912][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:55:21,912][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:55:22,027][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:55:22,028][inference][INFO] - Running inference benchmark -[2023-09-04 18:55:22,160][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:55:22,162][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:55:22,209][inference][INFO] - + Forward pass peak memory: 468.08678399999997 (MB) -[2023-09-04 18:55:22,211][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:55:22,212][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:55:22,249][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:55:27,295][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-09-04 18:55:27,296][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-09-04 18:55:27,296][inference][INFO] - Saving inference results -[2023-09-04 18:55:27,305][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index eedadcd671c9b268f0d9b8f98e16b8f59c9d06d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 9cf6b0832b3709f648f3d6c3c9be7312bd3c7cce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.475328,0.00376,266.0,0.494,202.0 diff --git a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f01f928b0dc0f62093bb761cb5e27a6cefddf4fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:17:09_22a69f1d7d520d5fbccbdb163d05db56bf79724c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 18:55:32,120][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:55:32,121][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:55:33,572][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 18:55:33,573][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:55:33,573][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:55:33,573][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:55:33,573][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:55:33,573][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:55:34,237][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:55:34,237][inference][INFO] - Running inference benchmark -[2023-09-04 18:55:34,440][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:55:34,487][inference][INFO] - + Forward pass peak memory: 469.475328 (MB) -[2023-09-04 18:55:34,489][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:55:34,520][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:55:39,567][inference][INFO] - + Forward pass latency: 3.76e-03 (s) -[2023-09-04 18:55:39,568][inference][INFO] - + Forward pass throughput: 266.00 (samples/s) -[2023-09-04 18:55:39,569][inference][INFO] - + Warming up the generation pass -[2023-09-04 18:55:40,121][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 18:55:45,556][inference][INFO] - + Generation pass latency: 4.94e-01 (s) -[2023-09-04 18:55:45,557][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-04 18:55:45,557][inference][INFO] - Saving inference results -[2023-09-04 18:55:45,569][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index ca7be0ccf7b1caddf162ac2d91e4da1837a53d11..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 39de393498d40ec02e6493cdce1ef276d1b2121b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 71ecca5c3cd63d794a41dc9ecf8ec71a33015b9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 80f17555a28578594ceee4857973f7e235950528..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.65727999999996,0.0037,270.0 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/main.log deleted file mode 100644 index ae497c5ea386d7af7b5597cc9983b7c816dee66f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:56:54,968][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:56:54,969][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:56:56,192][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:56:56,193][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:56:56,193][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:56:56,193][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:56:56,193][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:56:56,193][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:56:56,821][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:56:56,821][inference][INFO] - Running inference benchmark -[2023-09-04 18:56:56,938][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:56:56,939][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:56:57,001][inference][INFO] - + Forward pass peak memory: 466.65727999999996 (MB) -[2023-09-04 18:56:57,002][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:56:57,004][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:56:57,036][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:57:02,085][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-09-04 18:57:02,087][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-09-04 18:57:02,087][inference][INFO] - Saving inference results -[2023-09-04 18:57:02,099][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 79188f9d724b79c791d283621db374c1226bed4d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 68e6c7c8162d125a7ba06be638295c605348a8d0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 818191e2b78b5ccf3bd0b493867b92a76b82ece7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a3e22b57112db880fad471119969e86e2a5622e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.63212799999997,0.00435,920.0 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/main.log deleted file mode 100644 index 897ba365bc29a65fef977a693884ae6fc318df03..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 18:57:02,487][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:57:02,488][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:57:02,917][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 18:57:02,918][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:57:02,918][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:57:02,918][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:57:02,918][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:57:02,918][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:57:03,037][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:57:03,037][inference][INFO] - Running inference benchmark -[2023-09-04 18:57:03,161][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:57:03,162][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:57:03,207][inference][INFO] - + Forward pass peak memory: 467.63212799999997 (MB) -[2023-09-04 18:57:03,209][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 18:57:03,210][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:57:03,254][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:57:08,296][inference][INFO] - + Forward pass latency: 4.35e-03 (s) -[2023-09-04 18:57:08,297][inference][INFO] - + Forward pass throughput: 920.00 (samples/s) -[2023-09-04 18:57:08,298][inference][INFO] - Saving inference results -[2023-09-04 18:57:08,307][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index b132b60d1cb4c768522fdeb1133da52e4fe904d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f3b61568a3889784dafa96b96d6f3966a284ff96..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 0b44643285bfa4b479f121f9802376aa8dd5603e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.33.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 43cd0585199813c104ed82404feb049ba844f2f0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.467136,0.00386,259.0,0.503,199.0 diff --git a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e1d45ead9a9d0cd95f2c98b07b37a7714643af26..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_18:34:04_49b69fe0d4885e258dbf657e35c445a94ffd09ae/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 18:57:13,182][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 18:57:13,183][benchmark][INFO] - + Setting seed(42) -[2023-09-04 18:57:14,716][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 18:57:14,717][backend][INFO] - Configuring pytorch backend -[2023-09-04 18:57:14,717][backend][INFO] - + Checking initial device isolation -[2023-09-04 18:57:14,717][backend][INFO] - + Checking contineous device isolation -[2023-09-04 18:57:14,717][pytorch][INFO] - + Disabling gradients -[2023-09-04 18:57:14,717][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 18:57:15,445][pytorch][INFO] - + Turning on eval mode -[2023-09-04 18:57:15,446][inference][INFO] - Running inference benchmark -[2023-09-04 18:57:15,643][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 18:57:15,692][inference][INFO] - + Forward pass peak memory: 469.467136 (MB) -[2023-09-04 18:57:15,693][inference][INFO] - + Warming up the forward pass -[2023-09-04 18:57:15,731][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 18:57:20,777][inference][INFO] - + Forward pass latency: 3.86e-03 (s) -[2023-09-04 18:57:20,779][inference][INFO] - + Forward pass throughput: 259.00 (samples/s) -[2023-09-04 18:57:20,780][inference][INFO] - + Warming up the generation pass -[2023-09-04 18:57:21,289][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 18:57:26,324][inference][INFO] - + Generation pass latency: 5.03e-01 (s) -[2023-09-04 18:57:26,325][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-09-04 18:57:26,325][inference][INFO] - Saving inference results -[2023-09-04 18:57:26,338][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9935a78e89ecd4ff6019062f62098fac7ab993b1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index ac2d715e3287435640957deb05af9d60536aeecc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.808832,0.00364,275.0 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/main.log b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/main.log deleted file mode 100644 index 27c5930437c32ef633c6d6e7c259a9d75c4e8457..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 20:49:58,030][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 20:49:58,032][benchmark][INFO] - + Setting seed(42) -[2023-09-04 20:49:59,278][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 20:49:59,279][backend][INFO] - Configuring pytorch backend -[2023-09-04 20:49:59,279][backend][INFO] - + Checking initial device isolation -[2023-09-04 20:49:59,279][backend][INFO] - + Checking contineous device isolation -[2023-09-04 20:49:59,279][pytorch][INFO] - + Disabling gradients -[2023-09-04 20:49:59,279][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 20:50:00,189][pytorch][INFO] - + Turning on eval mode -[2023-09-04 20:50:00,190][inference][INFO] - Running inference benchmark -[2023-09-04 20:50:00,318][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 20:50:00,319][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 20:50:00,386][inference][INFO] - + Forward pass peak memory: 466.808832 (MB) -[2023-09-04 20:50:00,387][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 20:50:00,389][inference][INFO] - + Warming up the forward pass -[2023-09-04 20:50:00,422][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 20:50:05,471][inference][INFO] - + Forward pass latency: 3.64e-03 (s) -[2023-09-04 20:50:05,472][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-09-04 20:50:05,472][inference][INFO] - Saving inference results -[2023-09-04 20:50:05,483][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 43773a2d0b5e9a77319a6d418f7b100c8e609048..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f5a0da795e7c87e89c008e957f746b889d68ec34..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.79187199999996,0.00418,957.0 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/main.log b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/main.log deleted file mode 100644 index 23a2767bbf145fe1709ed5ad72235e380847aee7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-04 20:50:05,868][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 20:50:05,870][benchmark][INFO] - + Setting seed(42) -[2023-09-04 20:50:06,313][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-04 20:50:06,313][backend][INFO] - Configuring pytorch backend -[2023-09-04 20:50:06,314][backend][INFO] - + Checking initial device isolation -[2023-09-04 20:50:06,314][backend][INFO] - + Checking contineous device isolation -[2023-09-04 20:50:06,314][pytorch][INFO] - + Disabling gradients -[2023-09-04 20:50:06,314][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 20:50:06,438][pytorch][INFO] - + Turning on eval mode -[2023-09-04 20:50:06,438][inference][INFO] - Running inference benchmark -[2023-09-04 20:50:06,569][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 20:50:06,570][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 20:50:06,612][inference][INFO] - + Forward pass peak memory: 467.79187199999996 (MB) -[2023-09-04 20:50:06,613][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-04 20:50:06,615][inference][INFO] - + Warming up the forward pass -[2023-09-04 20:50:06,658][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 20:50:11,697][inference][INFO] - + Forward pass latency: 4.18e-03 (s) -[2023-09-04 20:50:11,698][inference][INFO] - + Forward pass throughput: 957.00 (samples/s) -[2023-09-04 20:50:11,698][inference][INFO] - Saving inference results -[2023-09-04 20:50:11,707][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f2adae8235e07d32e1a75b6e0b4efab3f60c80c2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c8f577510a48536bf173ea17fa3002c084838d63..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.876736,0.00313,319.0,0.482,207.0 diff --git a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 882e4523e70950a353b030153ccf8151a6ad6a3b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-04_19:12:11_d8e13b3e04da9e61c6f16df43815656f59688abd/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-04 20:50:16,515][benchmark][INFO] - Configuring inference benchmark -[2023-09-04 20:50:16,516][benchmark][INFO] - + Setting seed(42) -[2023-09-04 20:50:18,043][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-04 20:50:18,044][backend][INFO] - Configuring pytorch backend -[2023-09-04 20:50:18,044][backend][INFO] - + Checking initial device isolation -[2023-09-04 20:50:18,044][backend][INFO] - + Checking contineous device isolation -[2023-09-04 20:50:18,044][pytorch][INFO] - + Disabling gradients -[2023-09-04 20:50:18,044][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-04 20:50:18,712][pytorch][INFO] - + Turning on eval mode -[2023-09-04 20:50:18,713][inference][INFO] - Running inference benchmark -[2023-09-04 20:50:18,920][inference][INFO] - + Tracking forward pass peak memory -[2023-09-04 20:50:18,972][inference][INFO] - + Forward pass peak memory: 469.876736 (MB) -[2023-09-04 20:50:18,973][inference][INFO] - + Warming up the forward pass -[2023-09-04 20:50:19,007][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-04 20:50:24,060][inference][INFO] - + Forward pass latency: 3.13e-03 (s) -[2023-09-04 20:50:24,062][inference][INFO] - + Forward pass throughput: 319.00 (samples/s) -[2023-09-04 20:50:24,063][inference][INFO] - + Warming up the generation pass -[2023-09-04 20:50:24,551][inference][INFO] - + Tracking generation latency and throughput -[2023-09-04 20:50:29,852][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-09-04 20:50:29,853][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-09-04 20:50:29,853][inference][INFO] - Saving inference results -[2023-09-04 20:50:29,866][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a2694b18c349d9b0daa3a52416c1343747af5628..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 652546e8bb4f3fe1a15e7319f677f5eb78194e2e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.01715199999995,0.00361,277.0 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5d1dc5c62eb7fab7a98304138ee0bf04fb98a14a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 08:49:53,725][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 08:49:53,725][benchmark][INFO] - + Setting seed(42) -[2023-09-05 08:49:55,335][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 08:49:55,335][backend][INFO] - Configuring pytorch backend -[2023-09-05 08:49:55,336][backend][INFO] - + Checking initial device isolation -[2023-09-05 08:49:55,336][backend][INFO] - + Checking contineous device isolation -[2023-09-05 08:49:55,336][pytorch][INFO] - + Disabling gradients -[2023-09-05 08:49:55,336][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 08:49:55,961][pytorch][INFO] - + Turning on eval mode -[2023-09-05 08:49:55,961][inference][INFO] - Running inference benchmark -[2023-09-05 08:49:56,084][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 08:49:56,086][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 08:49:56,146][inference][INFO] - + Forward pass peak memory: 468.01715199999995 (MB) -[2023-09-05 08:49:56,147][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 08:49:56,149][inference][INFO] - + Warming up the forward pass -[2023-09-05 08:49:56,186][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 08:50:01,237][inference][INFO] - + Forward pass latency: 3.61e-03 (s) -[2023-09-05 08:50:01,239][inference][INFO] - + Forward pass throughput: 277.00 (samples/s) -[2023-09-05 08:50:01,239][inference][INFO] - Saving inference results -[2023-09-05 08:50:01,275][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3de555b59c2e3a0bb0b14ffdd1f33222fb0f19b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3f52a47be4a745dc8284df460a18d2bd0a939ed7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.975616,0.00344,1160.0 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 64710dbd2d0120de2e4de34afd047ec5d07ca9c2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 08:50:01,705][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 08:50:01,706][benchmark][INFO] - + Setting seed(42) -[2023-09-05 08:50:02,156][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 08:50:02,156][backend][INFO] - Configuring pytorch backend -[2023-09-05 08:50:02,157][backend][INFO] - + Checking initial device isolation -[2023-09-05 08:50:02,157][backend][INFO] - + Checking contineous device isolation -[2023-09-05 08:50:02,157][pytorch][INFO] - + Disabling gradients -[2023-09-05 08:50:02,157][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 08:50:02,275][pytorch][INFO] - + Turning on eval mode -[2023-09-05 08:50:02,276][inference][INFO] - Running inference benchmark -[2023-09-05 08:50:02,397][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 08:50:02,398][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 08:50:02,439][inference][INFO] - + Forward pass peak memory: 468.975616 (MB) -[2023-09-05 08:50:02,440][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 08:50:02,441][inference][INFO] - + Warming up the forward pass -[2023-09-05 08:50:02,476][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 08:50:07,523][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-09-05 08:50:07,524][inference][INFO] - + Forward pass throughput: 1160.00 (samples/s) -[2023-09-05 08:50:07,525][inference][INFO] - Saving inference results -[2023-09-05 08:50:07,536][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1892822f78da592022de610b69edc21b418dc44f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 7005b000f264f7a81fdfd870479f22d8e7b06849..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.307392,0.00389,257.0,0.586,171.0 diff --git a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 030b0639279f004ead7e7d04a23141a52d3ffdd3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_08:12:25_404ff8fc17599788a546818373be113b1fc8456a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 08:50:12,745][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 08:50:12,748][benchmark][INFO] - + Setting seed(42) -[2023-09-05 08:50:14,293][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 08:50:14,294][backend][INFO] - Configuring pytorch backend -[2023-09-05 08:50:14,294][backend][INFO] - + Checking initial device isolation -[2023-09-05 08:50:14,294][backend][INFO] - + Checking contineous device isolation -[2023-09-05 08:50:14,295][pytorch][INFO] - + Disabling gradients -[2023-09-05 08:50:14,295][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 08:50:14,985][pytorch][INFO] - + Turning on eval mode -[2023-09-05 08:50:14,986][inference][INFO] - Running inference benchmark -[2023-09-05 08:50:15,191][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 08:50:15,237][inference][INFO] - + Forward pass peak memory: 469.307392 (MB) -[2023-09-05 08:50:15,239][inference][INFO] - + Warming up the forward pass -[2023-09-05 08:50:15,271][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 08:50:20,315][inference][INFO] - + Forward pass latency: 3.89e-03 (s) -[2023-09-05 08:50:20,317][inference][INFO] - + Forward pass throughput: 257.00 (samples/s) -[2023-09-05 08:50:20,318][inference][INFO] - + Warming up the generation pass -[2023-09-05 08:50:20,918][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 08:50:26,197][inference][INFO] - + Generation pass latency: 5.86e-01 (s) -[2023-09-05 08:50:26,198][inference][INFO] - + Generation pass throughput: 171.00 (tokens/s) -[2023-09-05 08:50:26,198][inference][INFO] - Saving inference results -[2023-09-05 08:50:26,238][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ffcdf4f501e92652878958e128f7b5adec12a24b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 968c6b45fd14257d4b5099c9d7ea4055aa4af447..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.48115199999995,0.00327,306.0 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/main.log deleted file mode 100644 index 51bbbf1abb630de6459e8a54a64722fe374f15de..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:49:51,885][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:49:51,886][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:49:53,286][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:49:53,286][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:49:53,286][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:49:53,287][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:49:53,287][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:49:53,287][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:49:53,905][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:49:53,905][inference][INFO] - Running inference benchmark -[2023-09-05 10:49:54,027][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:49:54,028][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:49:54,195][inference][INFO] - + Forward pass peak memory: 466.48115199999995 (MB) -[2023-09-05 10:49:54,196][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:49:54,198][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:49:54,233][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:49:59,291][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-09-05 10:49:59,292][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-09-05 10:49:59,292][inference][INFO] - Saving inference results -[2023-09-05 10:49:59,302][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1a7f797b5e57e89c8a5c93ca352dd05cb8bb9241..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 67c63fcc962645478982c7ae7172e05554655558..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.50515199999995,0.00464,862.0 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/main.log deleted file mode 100644 index f08d10f4d6198dd0029c07186b56ab20f11ac467..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:49:59,672][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:49:59,673][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:50:00,110][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:50:00,110][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:50:00,111][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:50:00,111][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:50:00,111][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:50:00,111][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:50:00,228][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:50:00,229][inference][INFO] - Running inference benchmark -[2023-09-05 10:50:00,347][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:50:00,348][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:50:00,389][inference][INFO] - + Forward pass peak memory: 467.50515199999995 (MB) -[2023-09-05 10:50:00,390][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:50:00,391][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:50:00,439][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:50:05,482][inference][INFO] - + Forward pass latency: 4.64e-03 (s) -[2023-09-05 10:50:05,483][inference][INFO] - + Forward pass throughput: 862.00 (samples/s) -[2023-09-05 10:50:05,483][inference][INFO] - Saving inference results -[2023-09-05 10:50:05,490][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f10ac045a20da32655c985b2f5a53050f89a3a0d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d8812e1467cc880097980222da0d2c3c9f54d8ba..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.44255999999996,0.00341,293.0,0.581,172.0 diff --git a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ce7cd826f70ae18d6ca5d9510a178d41c0ea5012..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:19:56_feec56959afe480e57b2acc177111ae18a5ea757/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 10:50:10,335][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:50:10,337][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:50:11,786][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 10:50:11,786][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:50:11,787][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:50:11,787][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:50:11,787][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:50:11,787][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:50:12,487][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:50:12,487][inference][INFO] - Running inference benchmark -[2023-09-05 10:50:12,680][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:50:12,820][inference][INFO] - + Forward pass peak memory: 469.44255999999996 (MB) -[2023-09-05 10:50:12,822][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:50:12,856][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:50:17,910][inference][INFO] - + Forward pass latency: 3.41e-03 (s) -[2023-09-05 10:50:17,912][inference][INFO] - + Forward pass throughput: 293.00 (samples/s) -[2023-09-05 10:50:17,912][inference][INFO] - + Warming up the generation pass -[2023-09-05 10:50:18,519][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 10:50:23,746][inference][INFO] - + Generation pass latency: 5.81e-01 (s) -[2023-09-05 10:50:23,747][inference][INFO] - + Generation pass throughput: 172.00 (tokens/s) -[2023-09-05 10:50:23,747][inference][INFO] - Saving inference results -[2023-09-05 10:50:23,759][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2f1fde0b241a6650c9d6b8b6556d7ea18ae39c99..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 68697c289c347a0943fabdf87073a401758c2e62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.73862399999996,0.0033,303.0 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/main.log deleted file mode 100644 index cdc9a3d82bbe0e0e47adfcb9457ff1a8e7a7cc1b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:51:33,242][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:51:33,243][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:51:34,682][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:51:34,682][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:51:34,682][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:51:34,683][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:51:34,683][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:51:34,683][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:51:35,312][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:51:35,313][inference][INFO] - Running inference benchmark -[2023-09-05 10:51:35,454][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:51:35,455][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:51:35,619][inference][INFO] - + Forward pass peak memory: 467.73862399999996 (MB) -[2023-09-05 10:51:35,620][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:51:35,622][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:51:35,656][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:51:40,710][inference][INFO] - + Forward pass latency: 3.30e-03 (s) -[2023-09-05 10:51:40,711][inference][INFO] - + Forward pass throughput: 303.00 (samples/s) -[2023-09-05 10:51:40,711][inference][INFO] - Saving inference results -[2023-09-05 10:51:40,722][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 5303c9e75d18850dd7c8162afbc63ccf5e208d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 745ee79d759c85d7a9dd5fbfe1e1318ab6df085f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.74624,0.00366,1090.0 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/main.log deleted file mode 100644 index 5f50f1742cd51743f5ec3df89ff596325aa28eef..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:51:41,090][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:51:41,090][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:51:41,527][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:51:41,527][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:51:41,527][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:51:41,527][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:51:41,527][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:51:41,527][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:51:41,641][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:51:41,641][inference][INFO] - Running inference benchmark -[2023-09-05 10:51:41,761][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:51:41,762][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:51:41,803][inference][INFO] - + Forward pass peak memory: 468.74624 (MB) -[2023-09-05 10:51:41,804][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:51:41,805][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:51:41,842][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:51:46,889][inference][INFO] - + Forward pass latency: 3.66e-03 (s) -[2023-09-05 10:51:46,890][inference][INFO] - + Forward pass throughput: 1090.00 (samples/s) -[2023-09-05 10:51:46,890][inference][INFO] - Saving inference results -[2023-09-05 10:51:46,898][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e2e4ef3f863900095e4ebbebe2aa62358c1045d2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0a8d4a18e457f3458c22557b8b4d5970c61f0c32..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.516288,0.00331,302.0,0.484,207.0 diff --git a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0d2eac68200d61efe45f1fe2ea93057f09458a63..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:34:28_fbbe1b8a406a09b47673f606f0af6f3d5e045575/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 10:51:51,670][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:51:51,671][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:51:53,221][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 10:51:53,221][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:51:53,221][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:51:53,222][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:51:53,222][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:51:53,222][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:51:53,970][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:51:53,971][inference][INFO] - Running inference benchmark -[2023-09-05 10:51:54,167][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:51:54,324][inference][INFO] - + Forward pass peak memory: 469.516288 (MB) -[2023-09-05 10:51:54,326][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:51:54,359][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:51:59,414][inference][INFO] - + Forward pass latency: 3.31e-03 (s) -[2023-09-05 10:51:59,415][inference][INFO] - + Forward pass throughput: 302.00 (samples/s) -[2023-09-05 10:51:59,416][inference][INFO] - + Warming up the generation pass -[2023-09-05 10:51:59,910][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 10:52:05,233][inference][INFO] - + Generation pass latency: 4.84e-01 (s) -[2023-09-05 10:52:05,234][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-09-05 10:52:05,234][inference][INFO] - Saving inference results -[2023-09-05 10:52:05,246][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e41dee84a5e552af52724504119ddf186ac8e7f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c36f634156d09d040e4fcd1ac962d1f3f9040714..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.845696,0.00339,295.0 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/main.log deleted file mode 100644 index 4b48fd24e99578850a13f42b041d646ecbcad274..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:53:14,579][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:53:14,580][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:53:15,789][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:53:15,789][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:53:15,789][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:53:15,789][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:53:15,790][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:53:15,790][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:53:16,388][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:53:16,389][inference][INFO] - Running inference benchmark -[2023-09-05 10:53:16,512][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:53:16,514][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:53:16,724][inference][INFO] - + Forward pass peak memory: 466.845696 (MB) -[2023-09-05 10:53:16,725][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:53:16,727][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:53:16,761][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:53:21,813][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-09-05 10:53:21,814][inference][INFO] - + Forward pass throughput: 295.00 (samples/s) -[2023-09-05 10:53:21,815][inference][INFO] - Saving inference results -[2023-09-05 10:53:21,826][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 56bf58e638bf0e066a9d9cfa616cbef7b7415844..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 079b20cf55f24be9a0ca6669516a80f9a12acb63..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.836928,0.00469,853.0 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/main.log deleted file mode 100644 index e9073be87ed113ca369e13f0de731e22b268951b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:53:22,192][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:53:22,193][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:53:22,625][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:53:22,626][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:53:22,626][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:53:22,626][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:53:22,626][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:53:22,626][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:53:22,750][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:53:22,751][inference][INFO] - Running inference benchmark -[2023-09-05 10:53:22,871][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:53:22,873][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:53:22,915][inference][INFO] - + Forward pass peak memory: 467.836928 (MB) -[2023-09-05 10:53:22,915][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:53:22,917][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:53:22,965][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:53:28,005][inference][INFO] - + Forward pass latency: 4.69e-03 (s) -[2023-09-05 10:53:28,006][inference][INFO] - + Forward pass throughput: 853.00 (samples/s) -[2023-09-05 10:53:28,006][inference][INFO] - Saving inference results -[2023-09-05 10:53:28,016][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 0995cfd5e27fbdbab791ba7b2f14f37e3a2a8ebe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d486ccd2e4248149c7c6bb8bdb2fb3e5006458b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.684224,0.00343,292.0,0.514,195.0 diff --git a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 80703d1d73e7084b10de8e8e7e8cf31e692bb960..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_09:37:54_1cc3bc22fed6ffc5937cf66c799dd97840622e69/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 10:53:32,870][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:53:32,871][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:53:34,399][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 10:53:34,399][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:53:34,399][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:53:34,400][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:53:34,400][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:53:34,400][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:53:35,050][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:53:35,050][inference][INFO] - Running inference benchmark -[2023-09-05 10:53:35,241][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:53:35,414][inference][INFO] - + Forward pass peak memory: 469.684224 (MB) -[2023-09-05 10:53:35,416][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:53:35,451][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:53:40,500][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-09-05 10:53:40,502][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-09-05 10:53:40,502][inference][INFO] - + Warming up the generation pass -[2023-09-05 10:53:41,050][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 10:53:46,186][inference][INFO] - + Generation pass latency: 5.14e-01 (s) -[2023-09-05 10:53:46,187][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-09-05 10:53:46,187][inference][INFO] - Saving inference results -[2023-09-05 10:53:46,198][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 35bbb6fa25c05dfc9149808342a2237164fcc65c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e27f7d3116f54e01abf35f10dbccdd2c3632a975..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.522112,0.00328,305.0 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/main.log deleted file mode 100644 index 276d9585af382a1b599938231a7308eb504f6d97..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:54:55,622][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:54:55,622][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:54:56,814][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:54:56,814][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:54:56,814][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:54:56,814][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:54:56,815][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:54:56,815][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:54:57,549][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:54:57,549][inference][INFO] - Running inference benchmark -[2023-09-05 10:54:57,671][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:54:57,673][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:54:57,854][inference][INFO] - + Forward pass peak memory: 466.522112 (MB) -[2023-09-05 10:54:57,856][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:54:57,857][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:54:57,892][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:55:02,948][inference][INFO] - + Forward pass latency: 3.28e-03 (s) -[2023-09-05 10:55:02,949][inference][INFO] - + Forward pass throughput: 305.00 (samples/s) -[2023-09-05 10:55:02,949][inference][INFO] - Saving inference results -[2023-09-05 10:55:02,960][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e768f088e1f6201fbbd4819ff835b1f23756a10c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e774d56d58906061868d05c478195229dbe30b6a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.550208,0.00458,873.0 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 19768464ac845ac66bc92401bf878d44bc81fa24..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:55:03,326][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:55:03,326][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:55:03,759][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:55:03,759][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:55:03,759][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:55:03,759][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:55:03,759][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:55:03,760][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:55:03,870][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:55:03,871][inference][INFO] - Running inference benchmark -[2023-09-05 10:55:03,996][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:55:03,998][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:55:04,040][inference][INFO] - + Forward pass peak memory: 467.550208 (MB) -[2023-09-05 10:55:04,041][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:55:04,043][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:55:04,089][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:55:09,132][inference][INFO] - + Forward pass latency: 4.58e-03 (s) -[2023-09-05 10:55:09,133][inference][INFO] - + Forward pass throughput: 873.00 (samples/s) -[2023-09-05 10:55:09,133][inference][INFO] - Saving inference results -[2023-09-05 10:55:09,142][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index c1b30823dc193f022830323ab027851b02417675..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5113531a5c91b5d3be2fb2069ce19fb81859657c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.45484799999997,0.00327,306.0,0.495,202.0 diff --git a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8d8365bf9417b58976c929ec978495807ae877f5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:07:02_52a46dc57bb653aa9dab440e4bb70988b15cdc7e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 10:55:13,935][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:55:13,936][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:55:15,351][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 10:55:15,351][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:55:15,351][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:55:15,351][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:55:15,351][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:55:15,351][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:55:15,990][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:55:15,990][inference][INFO] - Running inference benchmark -[2023-09-05 10:55:16,209][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:55:16,341][inference][INFO] - + Forward pass peak memory: 469.45484799999997 (MB) -[2023-09-05 10:55:16,342][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:55:16,381][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:55:21,433][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-09-05 10:55:21,434][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-09-05 10:55:21,435][inference][INFO] - + Warming up the generation pass -[2023-09-05 10:55:21,937][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 10:55:27,385][inference][INFO] - + Generation pass latency: 4.95e-01 (s) -[2023-09-05 10:55:27,386][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-05 10:55:27,386][inference][INFO] - Saving inference results -[2023-09-05 10:55:27,397][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 94b8e545cab20715460a1850cd366d2e6c815ef9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2a15eed3b49e27ec9f6f957bc0ae78fd6dd54d0b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.86208,0.00289,346.0 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/main.log deleted file mode 100644 index b3aae28b0826eab36e1e62caadde15b069c6878b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:56:36,235][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:56:36,236][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:56:37,443][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:56:37,443][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:56:37,444][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:56:37,444][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:56:37,444][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:56:37,444][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:56:38,164][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:56:38,165][inference][INFO] - Running inference benchmark -[2023-09-05 10:56:38,284][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:56:38,286][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:56:38,539][inference][INFO] - + Forward pass peak memory: 466.86208 (MB) -[2023-09-05 10:56:38,540][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:56:38,542][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:56:38,579][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:56:43,631][inference][INFO] - + Forward pass latency: 2.89e-03 (s) -[2023-09-05 10:56:43,633][inference][INFO] - + Forward pass throughput: 346.00 (samples/s) -[2023-09-05 10:56:43,633][inference][INFO] - Saving inference results -[2023-09-05 10:56:43,645][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 868d6050fb42826f4ab108caa8084962600ff29c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b4cc28a9be1b9acf37f5053d55e196281cf805be..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.96390399999996,0.00324,1230.0 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8ad0089d1e54478f714a414d121d1c2923f7567b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 10:56:44,097][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:56:44,098][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:56:44,536][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 10:56:44,536][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:56:44,536][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:56:44,536][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:56:44,536][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:56:44,537][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:56:44,651][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:56:44,652][inference][INFO] - Running inference benchmark -[2023-09-05 10:56:44,778][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:56:44,779][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:56:44,821][inference][INFO] - + Forward pass peak memory: 467.96390399999996 (MB) -[2023-09-05 10:56:44,822][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 10:56:44,823][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:56:44,861][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:56:49,909][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-05 10:56:49,911][inference][INFO] - + Forward pass throughput: 1230.00 (samples/s) -[2023-09-05 10:56:49,911][inference][INFO] - Saving inference results -[2023-09-05 10:56:49,919][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e191727d4e52959d3548ef75323d550cb07323e9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index ba30ecc02c6994f882494bab70fcafb9f77e94ec..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.651456,0.00345,290.0,0.538,186.0 diff --git a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index a77018985c4579b81c2c46316ac7f2d26556eff5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_10:13:06_6f125aaa4807d84e9004ce79035c7653aedfd630/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 10:56:54,993][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 10:56:54,994][benchmark][INFO] - + Setting seed(42) -[2023-09-05 10:56:56,536][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 10:56:56,536][backend][INFO] - Configuring pytorch backend -[2023-09-05 10:56:56,536][backend][INFO] - + Checking initial device isolation -[2023-09-05 10:56:56,536][backend][INFO] - + Checking contineous device isolation -[2023-09-05 10:56:56,537][pytorch][INFO] - + Disabling gradients -[2023-09-05 10:56:56,537][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 10:56:57,269][pytorch][INFO] - + Turning on eval mode -[2023-09-05 10:56:57,270][inference][INFO] - Running inference benchmark -[2023-09-05 10:56:57,467][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 10:56:57,673][inference][INFO] - + Forward pass peak memory: 469.651456 (MB) -[2023-09-05 10:56:57,675][inference][INFO] - + Warming up the forward pass -[2023-09-05 10:56:57,717][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 10:57:02,768][inference][INFO] - + Forward pass latency: 3.45e-03 (s) -[2023-09-05 10:57:02,769][inference][INFO] - + Forward pass throughput: 290.00 (samples/s) -[2023-09-05 10:57:02,769][inference][INFO] - + Warming up the generation pass -[2023-09-05 10:57:03,285][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 10:57:08,667][inference][INFO] - + Generation pass latency: 5.38e-01 (s) -[2023-09-05 10:57:08,668][inference][INFO] - + Generation pass throughput: 186.00 (tokens/s) -[2023-09-05 10:57:08,668][inference][INFO] - Saving inference results -[2023-09-05 10:57:08,679][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1fb9a4d8df66260e57e764bebde8c848554903dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 64b04775d60137101e00effc4374d7642ab13dab..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.358272,0.00304,329.0 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/main.log deleted file mode 100644 index ef025e4df7c050daf33c7afd50add4ed9237daf3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:00:51,848][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:00:51,849][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:00:53,144][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:00:53,144][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:00:53,144][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:00:53,145][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:00:53,145][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:00:53,145][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:00:53,900][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:00:53,901][inference][INFO] - Running inference benchmark -[2023-09-05 13:00:54,025][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:00:54,026][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:00:54,195][inference][INFO] - + Forward pass peak memory: 466.358272 (MB) -[2023-09-05 13:00:54,196][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:00:54,198][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:00:54,238][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:00:59,289][inference][INFO] - + Forward pass latency: 3.04e-03 (s) -[2023-09-05 13:00:59,291][inference][INFO] - + Forward pass throughput: 329.00 (samples/s) -[2023-09-05 13:00:59,292][inference][INFO] - Saving inference results -[2023-09-05 13:00:59,304][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ccc34eca620df5cb0198a805dea2e61cdca0853b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ab9642ead329219adef7db80b0a7ffcecb7a3113..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.41504,0.00334,1200.0 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4bf5e1de7de43cccd2a0afeaddc0e1add9fbecf6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:00:59,711][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:00:59,712][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:01:00,147][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:01:00,148][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:01:00,148][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:01:00,148][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:01:00,148][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:01:00,148][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:01:00,264][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:01:00,265][inference][INFO] - Running inference benchmark -[2023-09-05 13:01:00,382][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:01:00,384][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:01:00,430][inference][INFO] - + Forward pass peak memory: 467.41504 (MB) -[2023-09-05 13:01:00,431][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:01:00,433][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:01:00,476][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:01:05,524][inference][INFO] - + Forward pass latency: 3.34e-03 (s) -[2023-09-05 13:01:05,525][inference][INFO] - + Forward pass throughput: 1200.00 (samples/s) -[2023-09-05 13:01:05,526][inference][INFO] - Saving inference results -[2023-09-05 13:01:05,533][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ddf34688aebecc563ca357b49b2575d60aa04a24..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 58e4369ad0782220fe9fb04fbba5d29a53aa18ef..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.62687999999997,0.00343,292.0,0.588,170.0 diff --git a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 398a18a47dda02752618d9fd5859942ff77b7f2b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:04:49_391f26459ab1a392aedc82e0546ce5f88acb7cd5/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 13:01:10,605][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:01:10,606][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:01:12,379][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 13:01:12,379][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:01:12,379][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:01:12,380][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:01:12,380][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:01:12,380][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:01:13,130][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:01:13,130][inference][INFO] - Running inference benchmark -[2023-09-05 13:01:13,326][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:01:13,466][inference][INFO] - + Forward pass peak memory: 469.62687999999997 (MB) -[2023-09-05 13:01:13,467][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:01:13,505][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:01:18,556][inference][INFO] - + Forward pass latency: 3.43e-03 (s) -[2023-09-05 13:01:18,558][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-09-05 13:01:18,558][inference][INFO] - + Warming up the generation pass -[2023-09-05 13:01:19,067][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 13:01:24,359][inference][INFO] - + Generation pass latency: 5.88e-01 (s) -[2023-09-05 13:01:24,360][inference][INFO] - + Generation pass throughput: 170.00 (tokens/s) -[2023-09-05 13:01:24,360][inference][INFO] - Saving inference results -[2023-09-05 13:01:24,370][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1e24973e66f711a3016960e8f488b3cad9e3a0ca..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index b95ffd61537ed3b7693c8b91f47e312b41337d37..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.18624,0.0035,286.0 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/main.log deleted file mode 100644 index 68ef671ccd176f28addfce6087403d1324633a9a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:02:33,854][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:02:33,855][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:02:35,087][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:02:35,087][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:02:35,088][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:02:35,088][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:02:35,088][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:02:35,088][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:02:35,709][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:02:35,709][inference][INFO] - Running inference benchmark -[2023-09-05 13:02:35,834][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:02:35,835][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:02:35,999][inference][INFO] - + Forward pass peak memory: 466.18624 (MB) -[2023-09-05 13:02:36,001][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:02:36,002][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:02:36,046][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:02:41,101][inference][INFO] - + Forward pass latency: 3.50e-03 (s) -[2023-09-05 13:02:41,103][inference][INFO] - + Forward pass throughput: 286.00 (samples/s) -[2023-09-05 13:02:41,103][inference][INFO] - Saving inference results -[2023-09-05 13:02:41,115][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 3c5e94be4cf96b537d8b2ec4ae0578f2d8bf6cd9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f685c248c45acc75e820749300752ab4246dcf18..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.197952,0.0036,1110.0 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/main.log deleted file mode 100644 index 15474be53d6a80b8a21ff301fe6dc16c9d8aacb1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:02:41,486][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:02:41,487][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:02:41,935][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:02:41,935][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:02:41,936][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:02:41,936][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:02:41,936][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:02:41,936][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:02:42,063][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:02:42,064][inference][INFO] - Running inference benchmark -[2023-09-05 13:02:42,186][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:02:42,188][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:02:42,229][inference][INFO] - + Forward pass peak memory: 467.197952 (MB) -[2023-09-05 13:02:42,230][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:02:42,231][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:02:42,268][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:02:47,319][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-09-05 13:02:47,320][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-09-05 13:02:47,320][inference][INFO] - Saving inference results -[2023-09-05 13:02:47,329][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 5515b76aa413c87f5abb6c28aa2a2c406e7694c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index a03067579046d6ff1af333332f0665345203a6c9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.44255999999996,0.00327,306.0,0.568,176.0 diff --git a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index d1dd0599ef67dbfd3d4fd417255f34abef5b89af..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:27:20_6316ce8d2703f210b91853aba90d44755a241334/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 13:02:52,161][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:02:52,161][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:02:53,613][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 13:02:53,614][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:02:53,614][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:02:53,614][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:02:53,614][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:02:53,614][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:02:54,280][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:02:54,281][inference][INFO] - Running inference benchmark -[2023-09-05 13:02:54,493][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:02:54,671][inference][INFO] - + Forward pass peak memory: 469.44255999999996 (MB) -[2023-09-05 13:02:54,673][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:02:54,706][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:02:59,760][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-09-05 13:02:59,762][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-09-05 13:02:59,762][inference][INFO] - + Warming up the generation pass -[2023-09-05 13:03:00,276][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 13:03:05,387][inference][INFO] - + Generation pass latency: 5.68e-01 (s) -[2023-09-05 13:03:05,387][inference][INFO] - + Generation pass throughput: 176.00 (tokens/s) -[2023-09-05 13:03:05,388][inference][INFO] - Saving inference results -[2023-09-05 13:03:05,400][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e34826f3ed42a40d44e6763206e75e8b5644b98b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index df913385860b4d0d6b2e5216e315a02122d6f9a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.706432,0.00333,300.0 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8cbe9c41f79bff9ff463bf05bb0ebdda256a837f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:04:14,607][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:04:14,608][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:04:15,861][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:04:15,862][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:04:15,862][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:04:15,862][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:04:15,862][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:04:15,863][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:04:16,481][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:04:16,481][inference][INFO] - Running inference benchmark -[2023-09-05 13:04:16,606][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:04:16,608][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:04:16,775][inference][INFO] - + Forward pass peak memory: 466.706432 (MB) -[2023-09-05 13:04:16,777][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:04:16,778][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:04:16,812][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:04:21,866][inference][INFO] - + Forward pass latency: 3.33e-03 (s) -[2023-09-05 13:04:21,868][inference][INFO] - + Forward pass throughput: 300.00 (samples/s) -[2023-09-05 13:04:21,868][inference][INFO] - Saving inference results -[2023-09-05 13:04:21,879][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c658bbace725cbd34b6da6a0908767dae10134d9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 514c4144f991a49b5a651736898e540eef51be44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.714048,0.00459,871.0 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/main.log deleted file mode 100644 index 20cb5534842b0880fff90f86d08946cb767a9b22..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:04:22,244][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:04:22,245][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:04:22,689][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:04:22,690][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:04:22,690][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:04:22,690][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:04:22,690][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:04:22,690][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:04:22,809][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:04:22,809][inference][INFO] - Running inference benchmark -[2023-09-05 13:04:22,939][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:04:22,940][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:04:22,986][inference][INFO] - + Forward pass peak memory: 467.714048 (MB) -[2023-09-05 13:04:22,986][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:04:22,988][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:04:23,034][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:04:28,074][inference][INFO] - + Forward pass latency: 4.59e-03 (s) -[2023-09-05 13:04:28,075][inference][INFO] - + Forward pass throughput: 871.00 (samples/s) -[2023-09-05 13:04:28,075][inference][INFO] - Saving inference results -[2023-09-05 13:04:28,083][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d60e8ff0ac3f117866852a6daccc96c8a0ecf4f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c77ffb2679d1a6128bb59e3dc25b64adb7b0cc21..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.50809599999997,0.00349,287.0,0.482,207.0 diff --git a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 595131ed645cd530eaff8689b103d947d0a9459b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:31:59_7011cd8667d7a51bd608e6a722f061d5ac5f4166/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 13:04:32,937][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:04:32,937][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:04:34,389][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 13:04:34,389][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:04:34,390][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:04:34,390][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:04:34,390][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:04:34,390][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:04:35,047][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:04:35,048][inference][INFO] - Running inference benchmark -[2023-09-05 13:04:35,249][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:04:35,414][inference][INFO] - + Forward pass peak memory: 469.50809599999997 (MB) -[2023-09-05 13:04:35,416][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:04:35,451][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:04:40,502][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-09-05 13:04:40,503][inference][INFO] - + Forward pass throughput: 287.00 (samples/s) -[2023-09-05 13:04:40,504][inference][INFO] - + Warming up the generation pass -[2023-09-05 13:04:40,995][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 13:04:46,303][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-09-05 13:04:46,304][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-09-05 13:04:46,304][inference][INFO] - Saving inference results -[2023-09-05 13:04:46,315][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 05ab7e6a736ad0eb9a40beb79b61e0054b2f3c45..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 91dbc6ab457172e6a17119dbe7232bbf92301829..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.88255999999996,0.00351,285.0 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/main.log deleted file mode 100644 index 006013c47849f6982b68cfe70aa120f092b00eea..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:05:57,545][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:05:57,546][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:05:58,764][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:05:58,764][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:05:58,765][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:05:58,765][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:05:58,765][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:05:58,765][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:05:59,416][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:05:59,417][inference][INFO] - Running inference benchmark -[2023-09-05 13:05:59,544][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:05:59,546][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:05:59,642][inference][INFO] - + Forward pass peak memory: 466.88255999999996 (MB) -[2023-09-05 13:05:59,643][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:05:59,645][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:05:59,700][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:06:04,751][inference][INFO] - + Forward pass latency: 3.51e-03 (s) -[2023-09-05 13:06:04,753][inference][INFO] - + Forward pass throughput: 285.00 (samples/s) -[2023-09-05 13:06:04,753][inference][INFO] - Saving inference results -[2023-09-05 13:06:04,762][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 80e4cc0fffa729082248b9f2a37648dc1b180508..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7798379f02f8ed89ea6b38d7bb36c8e565f608b5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.78777599999995,0.00385,1040.0 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/main.log deleted file mode 100644 index 7301b9b83a6eb63d40522ea1405d54b2798d6cb6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 13:06:05,131][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:06:05,132][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:06:05,579][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 13:06:05,579][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:06:05,579][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:06:05,579][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:06:05,580][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:06:05,580][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:06:05,700][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:06:05,701][inference][INFO] - Running inference benchmark -[2023-09-05 13:06:05,818][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:06:05,819][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:06:06,013][inference][INFO] - + Forward pass peak memory: 467.78777599999995 (MB) -[2023-09-05 13:06:06,015][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 13:06:06,017][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:06:06,060][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:06:11,108][inference][INFO] - + Forward pass latency: 3.85e-03 (s) -[2023-09-05 13:06:11,109][inference][INFO] - + Forward pass throughput: 1040.00 (samples/s) -[2023-09-05 13:06:11,109][inference][INFO] - Saving inference results -[2023-09-05 13:06:11,116][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f725cadb67d7d7152a901773ecd52219bc5c8911..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 56060a0cbba7ce79d3e57b2c487722c54618d393..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.037056,0.00389,257.0,0.508,197.0 diff --git a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 8ce0348632dfe4eeed71dc182d9b466f46e13eb1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_11:43:51_aea761499f4b1193f2706f471442da6f9df65d65/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 13:06:16,252][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 13:06:16,253][benchmark][INFO] - + Setting seed(42) -[2023-09-05 13:06:17,734][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 13:06:17,734][backend][INFO] - Configuring pytorch backend -[2023-09-05 13:06:17,734][backend][INFO] - + Checking initial device isolation -[2023-09-05 13:06:17,734][backend][INFO] - + Checking contineous device isolation -[2023-09-05 13:06:17,734][pytorch][INFO] - + Disabling gradients -[2023-09-05 13:06:17,735][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 13:06:18,393][pytorch][INFO] - + Turning on eval mode -[2023-09-05 13:06:18,394][inference][INFO] - Running inference benchmark -[2023-09-05 13:06:18,595][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 13:06:18,748][inference][INFO] - + Forward pass peak memory: 469.037056 (MB) -[2023-09-05 13:06:18,750][inference][INFO] - + Warming up the forward pass -[2023-09-05 13:06:18,785][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 13:06:23,832][inference][INFO] - + Forward pass latency: 3.89e-03 (s) -[2023-09-05 13:06:23,834][inference][INFO] - + Forward pass throughput: 257.00 (samples/s) -[2023-09-05 13:06:23,834][inference][INFO] - + Warming up the generation pass -[2023-09-05 13:06:24,372][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 13:06:29,456][inference][INFO] - + Generation pass latency: 5.08e-01 (s) -[2023-09-05 13:06:29,456][inference][INFO] - + Generation pass throughput: 197.00 (tokens/s) -[2023-09-05 13:06:29,457][inference][INFO] - Saving inference results -[2023-09-05 13:06:29,467][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b48f4bafb884505247114467fb78a808c986dcbd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e1fd5c01c5a92852772203b37f9b242ee4787ee3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.444288,0.0034,294.0 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9a07e577d2859fb989e4acfbae4a155cce51ed59..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 14:49:48,522][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:49:48,523][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:49:49,798][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 14:49:49,799][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:49:49,799][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:49:49,799][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:49:49,799][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:49:49,799][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:49:50,404][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:49:50,404][inference][INFO] - Running inference benchmark -[2023-09-05 14:49:50,527][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:49:50,528][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:49:50,589][inference][INFO] - + Forward pass peak memory: 466.444288 (MB) -[2023-09-05 14:49:50,590][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:49:50,592][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:49:50,628][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:49:55,681][inference][INFO] - + Forward pass latency: 3.40e-03 (s) -[2023-09-05 14:49:55,683][inference][INFO] - + Forward pass throughput: 294.00 (samples/s) -[2023-09-05 14:49:55,683][inference][INFO] - Saving inference results -[2023-09-05 14:49:55,697][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ac66939ea52652dde30d66c7037d2dddce339712..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1890d36182b2533869489809e7ec1b6038d09c80..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.3536,0.00431,928.0 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/main.log deleted file mode 100644 index d91cd5ade49b3f1877d97863ccd48b94aa80e885..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 14:49:56,143][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:49:56,144][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:49:56,692][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 14:49:56,693][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:49:56,693][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:49:56,693][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:49:56,693][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:49:56,693][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:49:56,837][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:49:56,838][inference][INFO] - Running inference benchmark -[2023-09-05 14:49:56,984][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:49:56,985][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:49:57,031][inference][INFO] - + Forward pass peak memory: 467.3536 (MB) -[2023-09-05 14:49:57,032][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:49:57,034][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:49:57,078][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:50:02,121][inference][INFO] - + Forward pass latency: 4.31e-03 (s) -[2023-09-05 14:50:02,122][inference][INFO] - + Forward pass throughput: 928.00 (samples/s) -[2023-09-05 14:50:02,122][inference][INFO] - Saving inference results -[2023-09-05 14:50:02,130][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b72afac6689f5910371b71fa70c506fb6ba7a9da..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 9017946fe4fe0c5eb8906948f987af375df55940..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.307392,0.00388,258.0,0.511,196.0 diff --git a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 429016a628faf70c6218e4068a26057f5dad8d75..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:47:00_9a70d6e56f2801c9a3aa80ca97e6a32024db72b7/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 14:50:07,272][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:50:07,273][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:50:08,674][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 14:50:08,675][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:50:08,675][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:50:08,675][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:50:08,675][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:50:08,675][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:50:09,409][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:50:09,410][inference][INFO] - Running inference benchmark -[2023-09-05 14:50:09,600][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:50:09,647][inference][INFO] - + Forward pass peak memory: 469.307392 (MB) -[2023-09-05 14:50:09,648][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:50:09,686][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:50:14,730][inference][INFO] - + Forward pass latency: 3.88e-03 (s) -[2023-09-05 14:50:14,732][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-09-05 14:50:14,733][inference][INFO] - + Warming up the generation pass -[2023-09-05 14:50:15,335][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 14:50:20,445][inference][INFO] - + Generation pass latency: 5.11e-01 (s) -[2023-09-05 14:50:20,446][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s) -[2023-09-05 14:50:20,447][inference][INFO] - Saving inference results -[2023-09-05 14:50:20,461][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 810a8d2ecb9d91925b6f24cf8656aa97538cf36e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 1029c9ce455e92a2799e1fc0a0f81d269bc44b16..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.94399999999996,0.00309,324.0 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 95166338ad8a1e222c3b06d60d0a18829262fe5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 14:51:31,130][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:51:31,131][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:51:32,406][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 14:51:32,406][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:51:32,406][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:51:32,406][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:51:32,407][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:51:32,407][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:51:33,024][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:51:33,025][inference][INFO] - Running inference benchmark -[2023-09-05 14:51:33,140][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:51:33,142][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:51:33,208][inference][INFO] - + Forward pass peak memory: 466.94399999999996 (MB) -[2023-09-05 14:51:33,209][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:51:33,211][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:51:33,243][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:51:38,294][inference][INFO] - + Forward pass latency: 3.09e-03 (s) -[2023-09-05 14:51:38,296][inference][INFO] - + Forward pass throughput: 324.00 (samples/s) -[2023-09-05 14:51:38,296][inference][INFO] - Saving inference results -[2023-09-05 14:51:38,306][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 0259e5caf6b1775cdb80d13dc0a32fd111c8aab8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 80cd315976d7a14024739732ce5871f8d2f51970..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.984384,0.00342,1170.0 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/main.log deleted file mode 100644 index d1ced60a85d90a0a13bce9d86abc71dc306c1a5e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 14:51:38,689][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:51:38,690][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:51:39,306][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 14:51:39,306][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:51:39,307][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:51:39,307][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:51:39,307][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:51:39,307][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:51:39,421][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:51:39,421][inference][INFO] - Running inference benchmark -[2023-09-05 14:51:39,539][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:51:39,540][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:51:39,585][inference][INFO] - + Forward pass peak memory: 467.984384 (MB) -[2023-09-05 14:51:39,586][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:51:39,587][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:51:39,627][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:51:44,680][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-09-05 14:51:44,681][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-09-05 14:51:44,681][inference][INFO] - Saving inference results -[2023-09-05 14:51:44,690][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index aeb0d36b642dcc21005214d04576b52c7df07065..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 75965126aaec39027435c50e7a3dd81adba360fa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.815296,0.00379,264.0,0.54,185.0 diff --git a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 061ec89f33daef357a052a543a30cab070508403..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_13:49:33_aa5c94d38deb3960e809b75bc959dc4357d3dd2b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 14:51:49,452][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:51:49,452][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:51:50,823][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 14:51:50,824][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:51:50,824][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:51:50,824][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:51:50,824][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:51:50,825][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:51:51,459][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:51:51,460][inference][INFO] - Running inference benchmark -[2023-09-05 14:51:51,653][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:51:51,697][inference][INFO] - + Forward pass peak memory: 469.815296 (MB) -[2023-09-05 14:51:51,699][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:51:51,730][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:51:56,776][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-09-05 14:51:56,778][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-09-05 14:51:56,779][inference][INFO] - + Warming up the generation pass -[2023-09-05 14:51:57,370][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 14:52:02,771][inference][INFO] - + Generation pass latency: 5.40e-01 (s) -[2023-09-05 14:52:02,772][inference][INFO] - + Generation pass throughput: 185.00 (tokens/s) -[2023-09-05 14:52:02,772][inference][INFO] - Saving inference results -[2023-09-05 14:52:02,788][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 166c85ddfd107a50b4764d51db6b996775c749bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2a90492743dc0314611ff1dc6d18a484776b9b4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.89075199999996,0.00327,306.0 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8386ca2eddb50f20d362dbf7665094eafab2d5c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 14:53:12,327][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:53:12,328][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:53:13,794][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 14:53:13,795][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:53:13,795][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:53:13,795][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:53:13,795][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:53:13,795][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:53:14,419][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:53:14,420][inference][INFO] - Running inference benchmark -[2023-09-05 14:53:14,537][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:53:14,538][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:53:14,599][inference][INFO] - + Forward pass peak memory: 466.89075199999996 (MB) -[2023-09-05 14:53:14,600][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:53:14,602][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:53:14,645][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:53:19,694][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-09-05 14:53:19,695][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-09-05 14:53:19,696][inference][INFO] - Saving inference results -[2023-09-05 14:53:19,709][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 85fa433007d2af7bd4ff47728f97f0a644d101a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 2f8de830629393c21c37fdfecee7c6b717fb29a5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.873792,0.00357,1120.0 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 0df1e786a231f0c62d92185703ab0206d3cb996f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 14:53:20,107][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:53:20,108][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:53:20,549][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 14:53:20,550][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:53:20,550][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:53:20,550][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:53:20,550][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:53:20,550][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:53:20,674][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:53:20,674][inference][INFO] - Running inference benchmark -[2023-09-05 14:53:20,807][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:53:20,808][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:53:20,853][inference][INFO] - + Forward pass peak memory: 467.873792 (MB) -[2023-09-05 14:53:20,854][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 14:53:20,855][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:53:20,907][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:53:25,950][inference][INFO] - + Forward pass latency: 3.57e-03 (s) -[2023-09-05 14:53:25,951][inference][INFO] - + Forward pass throughput: 1120.00 (samples/s) -[2023-09-05 14:53:25,951][inference][INFO] - Saving inference results -[2023-09-05 14:53:25,960][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f0e88a2705659f8fafa391a38cbcd4e47f42803c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3971de6e37ce4113cc0772d868372f22ac06661e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.037056,0.00387,258.0,0.55,182.0 diff --git a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 048f09357bdfeb4247c6ada67c037fc3b523f762..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_14:41:42_70a98024b1b0007d2d8bdced854cd9b638dbb07b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 14:53:30,765][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 14:53:30,766][benchmark][INFO] - + Setting seed(42) -[2023-09-05 14:53:32,145][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 14:53:32,146][backend][INFO] - Configuring pytorch backend -[2023-09-05 14:53:32,146][backend][INFO] - + Checking initial device isolation -[2023-09-05 14:53:32,146][backend][INFO] - + Checking contineous device isolation -[2023-09-05 14:53:32,146][pytorch][INFO] - + Disabling gradients -[2023-09-05 14:53:32,147][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 14:53:32,790][pytorch][INFO] - + Turning on eval mode -[2023-09-05 14:53:32,791][inference][INFO] - Running inference benchmark -[2023-09-05 14:53:32,988][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 14:53:33,036][inference][INFO] - + Forward pass peak memory: 469.037056 (MB) -[2023-09-05 14:53:33,038][inference][INFO] - + Warming up the forward pass -[2023-09-05 14:53:33,070][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 14:53:38,112][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-09-05 14:53:38,113][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-09-05 14:53:38,114][inference][INFO] - + Warming up the generation pass -[2023-09-05 14:53:38,704][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 14:53:44,207][inference][INFO] - + Generation pass latency: 5.50e-01 (s) -[2023-09-05 14:53:44,208][inference][INFO] - + Generation pass throughput: 182.00 (tokens/s) -[2023-09-05 14:53:44,208][inference][INFO] - Saving inference results -[2023-09-05 14:53:44,222][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 36b4229e97422823d228d58337819f44302e1379..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 29219a4e2c14f6c619da0f59b68dedc62bdbf21a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.808832,0.00323,310.0 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0021f3063a9dabde6804775596c369f758d27737..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 16:50:09,123][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 16:50:09,124][benchmark][INFO] - + Setting seed(42) -[2023-09-05 16:50:10,456][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 16:50:10,456][backend][INFO] - Configuring pytorch backend -[2023-09-05 16:50:10,457][backend][INFO] - + Checking initial device isolation -[2023-09-05 16:50:10,457][backend][INFO] - + Checking contineous device isolation -[2023-09-05 16:50:10,457][pytorch][INFO] - + Disabling gradients -[2023-09-05 16:50:10,457][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 16:50:11,080][pytorch][INFO] - + Turning on eval mode -[2023-09-05 16:50:11,081][inference][INFO] - Running inference benchmark -[2023-09-05 16:50:11,200][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:50:11,201][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 16:50:11,261][inference][INFO] - + Forward pass peak memory: 466.808832 (MB) -[2023-09-05 16:50:11,262][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:50:11,264][inference][INFO] - + Warming up the forward pass -[2023-09-05 16:50:11,301][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 16:50:16,354][inference][INFO] - + Forward pass latency: 3.23e-03 (s) -[2023-09-05 16:50:16,356][inference][INFO] - + Forward pass throughput: 310.00 (samples/s) -[2023-09-05 16:50:16,356][inference][INFO] - Saving inference results -[2023-09-05 16:50:16,367][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index b36e664efde362e3316ff1b9446534db3e3c51e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3ce3d60b06fc2666a6587edbfab7083cb6ab35a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.82464,0.00364,1100.0 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/main.log deleted file mode 100644 index 91a8dfede45a2d1a23c2bcdc22949a47093f03f8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 16:50:16,888][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 16:50:16,889][benchmark][INFO] - + Setting seed(42) -[2023-09-05 16:50:17,327][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 16:50:17,327][backend][INFO] - Configuring pytorch backend -[2023-09-05 16:50:17,327][backend][INFO] - + Checking initial device isolation -[2023-09-05 16:50:17,327][backend][INFO] - + Checking contineous device isolation -[2023-09-05 16:50:17,328][pytorch][INFO] - + Disabling gradients -[2023-09-05 16:50:17,328][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 16:50:17,445][pytorch][INFO] - + Turning on eval mode -[2023-09-05 16:50:17,445][inference][INFO] - Running inference benchmark -[2023-09-05 16:50:17,573][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:50:17,574][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 16:50:17,617][inference][INFO] - + Forward pass peak memory: 467.82464 (MB) -[2023-09-05 16:50:17,619][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:50:17,620][inference][INFO] - + Warming up the forward pass -[2023-09-05 16:50:17,658][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 16:50:22,703][inference][INFO] - + Forward pass latency: 3.64e-03 (s) -[2023-09-05 16:50:22,704][inference][INFO] - + Forward pass throughput: 1100.00 (samples/s) -[2023-09-05 16:50:22,704][inference][INFO] - Saving inference results -[2023-09-05 16:50:22,712][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1555a46b67d8163c347f593f426afabf871590f8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d9645807e04a06012be069fc0b76ce6bb34b543d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.803008,0.004,250.0,0.525,190.0 diff --git a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index e14c3adc8e0677cabc6a8f380edf451c7e85131e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_15:34:34_da1af21dbbc48ad4f6f0b27635cd3993ddc22b55/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 16:50:27,596][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 16:50:27,597][benchmark][INFO] - + Setting seed(42) -[2023-09-05 16:50:29,076][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 16:50:29,076][backend][INFO] - Configuring pytorch backend -[2023-09-05 16:50:29,077][backend][INFO] - + Checking initial device isolation -[2023-09-05 16:50:29,077][backend][INFO] - + Checking contineous device isolation -[2023-09-05 16:50:29,077][pytorch][INFO] - + Disabling gradients -[2023-09-05 16:50:29,077][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 16:50:29,874][pytorch][INFO] - + Turning on eval mode -[2023-09-05 16:50:29,875][inference][INFO] - Running inference benchmark -[2023-09-05 16:50:30,080][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 16:50:30,131][inference][INFO] - + Forward pass peak memory: 469.803008 (MB) -[2023-09-05 16:50:30,132][inference][INFO] - + Warming up the forward pass -[2023-09-05 16:50:30,170][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 16:50:35,213][inference][INFO] - + Forward pass latency: 4.00e-03 (s) -[2023-09-05 16:50:35,214][inference][INFO] - + Forward pass throughput: 250.00 (samples/s) -[2023-09-05 16:50:35,214][inference][INFO] - + Warming up the generation pass -[2023-09-05 16:50:35,808][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 16:50:41,065][inference][INFO] - + Generation pass latency: 5.25e-01 (s) -[2023-09-05 16:50:41,067][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s) -[2023-09-05 16:50:41,067][inference][INFO] - Saving inference results -[2023-09-05 16:50:41,082][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index cabbe928d9d6563230364d9c54ec7d63b8b7217f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index dfd8fba083366976a5d5d7d213f2d853f340500d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.57536,0.00308,325.0 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/main.log deleted file mode 100644 index d1faaf3c1f3646b5706d20aa62e8e07370e7c336..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 16:51:52,782][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 16:51:52,783][benchmark][INFO] - + Setting seed(42) -[2023-09-05 16:51:54,032][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 16:51:54,032][backend][INFO] - Configuring pytorch backend -[2023-09-05 16:51:54,032][backend][INFO] - + Checking initial device isolation -[2023-09-05 16:51:54,033][backend][INFO] - + Checking contineous device isolation -[2023-09-05 16:51:54,033][pytorch][INFO] - + Disabling gradients -[2023-09-05 16:51:54,033][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 16:51:54,669][pytorch][INFO] - + Turning on eval mode -[2023-09-05 16:51:54,670][inference][INFO] - Running inference benchmark -[2023-09-05 16:51:54,792][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:51:54,794][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 16:51:54,857][inference][INFO] - + Forward pass peak memory: 466.57536 (MB) -[2023-09-05 16:51:54,859][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:51:54,860][inference][INFO] - + Warming up the forward pass -[2023-09-05 16:51:54,896][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 16:51:59,949][inference][INFO] - + Forward pass latency: 3.08e-03 (s) -[2023-09-05 16:51:59,951][inference][INFO] - + Forward pass throughput: 325.00 (samples/s) -[2023-09-05 16:51:59,951][inference][INFO] - Saving inference results -[2023-09-05 16:51:59,963][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index f56d212e3c47f94f0c25f98839f1cdb2570d7c6f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7a1620bb2ac0c9d9f02e0af56130f7d5ffc3fe0c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.656704,0.00359,1110.0 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/main.log deleted file mode 100644 index f70b3bc4d77f323394c1e62f5d79f66599e2091c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 16:52:00,336][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 16:52:00,337][benchmark][INFO] - + Setting seed(42) -[2023-09-05 16:52:00,781][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 16:52:00,781][backend][INFO] - Configuring pytorch backend -[2023-09-05 16:52:00,781][backend][INFO] - + Checking initial device isolation -[2023-09-05 16:52:00,781][backend][INFO] - + Checking contineous device isolation -[2023-09-05 16:52:00,781][pytorch][INFO] - + Disabling gradients -[2023-09-05 16:52:00,782][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 16:52:00,901][pytorch][INFO] - + Turning on eval mode -[2023-09-05 16:52:00,902][inference][INFO] - Running inference benchmark -[2023-09-05 16:52:01,026][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:52:01,027][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 16:52:01,072][inference][INFO] - + Forward pass peak memory: 467.656704 (MB) -[2023-09-05 16:52:01,073][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 16:52:01,075][inference][INFO] - + Warming up the forward pass -[2023-09-05 16:52:01,124][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 16:52:06,172][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-09-05 16:52:06,173][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-09-05 16:52:06,173][inference][INFO] - Saving inference results -[2023-09-05 16:52:06,182][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6332136f25305c8e2e0c5f23e306cb24b30cc720..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4ba8f12fa2aaf2078a2aa6f8beabe93d07bed4e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.372928,0.00341,293.0,0.486,206.0 diff --git a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index cce1f9eabc03b47597e579d3d6e46317bed680d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_16:13:06_1110b565d62e56105c8e5e4e2848bfbf469f8200/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 16:52:10,999][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 16:52:10,999][benchmark][INFO] - + Setting seed(42) -[2023-09-05 16:52:12,424][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 16:52:12,425][backend][INFO] - Configuring pytorch backend -[2023-09-05 16:52:12,425][backend][INFO] - + Checking initial device isolation -[2023-09-05 16:52:12,425][backend][INFO] - + Checking contineous device isolation -[2023-09-05 16:52:12,425][pytorch][INFO] - + Disabling gradients -[2023-09-05 16:52:12,425][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 16:52:13,256][pytorch][INFO] - + Turning on eval mode -[2023-09-05 16:52:13,256][inference][INFO] - Running inference benchmark -[2023-09-05 16:52:13,455][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 16:52:13,506][inference][INFO] - + Forward pass peak memory: 469.372928 (MB) -[2023-09-05 16:52:13,507][inference][INFO] - + Warming up the forward pass -[2023-09-05 16:52:13,545][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 16:52:18,593][inference][INFO] - + Forward pass latency: 3.41e-03 (s) -[2023-09-05 16:52:18,595][inference][INFO] - + Forward pass throughput: 293.00 (samples/s) -[2023-09-05 16:52:18,596][inference][INFO] - + Warming up the generation pass -[2023-09-05 16:52:19,092][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 16:52:24,436][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-09-05 16:52:24,437][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-09-05 16:52:24,437][inference][INFO] - Saving inference results -[2023-09-05 16:52:24,451][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7e5a5254902b8c4f95f052e704f3debb72affe12..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 207cc7a48d238617ae94f82fe0c2ca6a7b93d800..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.874368,0.00363,275.0 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0f027bd07f5ac46d378c0162d88f49bee5810b53..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:49:58,894][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:49:58,895][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:50:00,273][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:50:00,274][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:50:00,274][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:50:00,274][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:50:00,274][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:50:00,274][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:50:00,880][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:50:00,880][inference][INFO] - Running inference benchmark -[2023-09-05 18:50:01,101][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:50:01,103][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:50:01,168][inference][INFO] - + Forward pass peak memory: 466.874368 (MB) -[2023-09-05 18:50:01,169][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:50:01,171][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:50:01,203][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:50:06,248][inference][INFO] - + Forward pass latency: 3.63e-03 (s) -[2023-09-05 18:50:06,250][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-09-05 18:50:06,250][inference][INFO] - Saving inference results -[2023-09-05 18:50:06,261][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 2bcf144e6666ecfd6878e7c21c9c6b39a983480d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ee969fff625545349dd352159ee2dba03a7ccde1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.85331199999996,0.00414,966.0 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4d44f8a1f6d2ecc6379500ea7eacdc8905d1cce3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:50:06,638][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:50:06,640][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:50:07,205][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:50:07,205][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:50:07,205][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:50:07,205][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:50:07,205][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:50:07,206][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:50:07,394][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:50:07,394][inference][INFO] - Running inference benchmark -[2023-09-05 18:50:07,527][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:50:07,528][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:50:07,574][inference][INFO] - + Forward pass peak memory: 467.85331199999996 (MB) -[2023-09-05 18:50:07,575][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:50:07,577][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:50:07,619][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:50:12,659][inference][INFO] - + Forward pass latency: 4.14e-03 (s) -[2023-09-05 18:50:12,660][inference][INFO] - + Forward pass throughput: 966.00 (samples/s) -[2023-09-05 18:50:12,660][inference][INFO] - Saving inference results -[2023-09-05 18:50:12,669][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9d4cd5cb6945fe924315301939bfcc068ad3c684..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 8c793c9a0a7f04bc925e8104a10638a655b9b592..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.16403199999996,0.00383,261.0,0.512,195.0 diff --git a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index af62b6d475467dd355c3961e8b120b0e58742a09..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:01:20_6bc517ccd4a3bcda4d0621d54a37c3e047df223a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 18:50:17,530][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:50:17,531][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:50:18,931][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 18:50:18,932][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:50:18,932][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:50:18,932][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:50:18,932][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:50:18,932][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:50:19,581][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:50:19,581][inference][INFO] - Running inference benchmark -[2023-09-05 18:50:19,922][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:50:19,974][inference][INFO] - + Forward pass peak memory: 469.16403199999996 (MB) -[2023-09-05 18:50:19,976][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:50:20,009][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:50:25,054][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-09-05 18:50:25,056][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-09-05 18:50:25,057][inference][INFO] - + Warming up the generation pass -[2023-09-05 18:50:25,647][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 18:50:30,764][inference][INFO] - + Generation pass latency: 5.12e-01 (s) -[2023-09-05 18:50:30,765][inference][INFO] - + Generation pass throughput: 195.00 (tokens/s) -[2023-09-05 18:50:30,765][inference][INFO] - Saving inference results -[2023-09-05 18:50:30,777][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fd400c19ae711b509cac4686a0467f3acc6dbb08..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 81ad2e55df25ac152dcbfc7441afd1d4e319d651..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.583552,0.00314,318.0 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/main.log deleted file mode 100644 index b36a71d1a35384e350ccd2250060390caaaec5de..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:51:45,971][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:51:45,972][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:51:47,203][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:51:47,203][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:51:47,203][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:51:47,204][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:51:47,204][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:51:47,204][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:51:47,812][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:51:47,813][inference][INFO] - Running inference benchmark -[2023-09-05 18:51:47,931][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:51:47,933][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:51:47,994][inference][INFO] - + Forward pass peak memory: 466.583552 (MB) -[2023-09-05 18:51:47,996][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:51:47,997][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:51:48,029][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:51:53,082][inference][INFO] - + Forward pass latency: 3.14e-03 (s) -[2023-09-05 18:51:53,084][inference][INFO] - + Forward pass throughput: 318.00 (samples/s) -[2023-09-05 18:51:53,084][inference][INFO] - Saving inference results -[2023-09-05 18:51:53,094][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 8386ac421fd1594573f4d4575b35384958f06e04..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d732f03f5301c0d44fa9193c9df180b1f72dab09..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.63622399999997,0.00347,1150.0 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 770da033ceeb263e906a28f15806e11645c7be8d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:51:53,466][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:51:53,467][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:51:53,917][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:51:53,917][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:51:53,917][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:51:53,918][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:51:53,918][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:51:53,918][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:51:54,034][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:51:54,034][inference][INFO] - Running inference benchmark -[2023-09-05 18:51:54,159][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:51:54,160][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:51:54,205][inference][INFO] - + Forward pass peak memory: 467.63622399999997 (MB) -[2023-09-05 18:51:54,206][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:51:54,208][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:51:54,244][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:51:59,290][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-09-05 18:51:59,291][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-09-05 18:51:59,291][inference][INFO] - Saving inference results -[2023-09-05 18:51:59,298][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a1ca39d561fdf24ec56df18a80a99135bf78c621..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2b604a42c173b711d25fc77b5f82c08e49333a1b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.409792,0.00387,258.0,0.487,205.0 diff --git a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9edcc1e3f548ade8d13c10fe2287c7c32a884df5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:26:06_8d518013efbd10c178dd0dba0f9ba93229e2e78a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 18:52:04,098][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:52:04,100][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:52:05,484][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 18:52:05,484][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:52:05,484][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:52:05,485][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:52:05,485][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:52:05,485][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:52:06,132][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:52:06,132][inference][INFO] - Running inference benchmark -[2023-09-05 18:52:06,324][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:52:06,371][inference][INFO] - + Forward pass peak memory: 469.409792 (MB) -[2023-09-05 18:52:06,372][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:52:06,409][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:52:11,455][inference][INFO] - + Forward pass latency: 3.87e-03 (s) -[2023-09-05 18:52:11,457][inference][INFO] - + Forward pass throughput: 258.00 (samples/s) -[2023-09-05 18:52:11,457][inference][INFO] - + Warming up the generation pass -[2023-09-05 18:52:12,046][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 18:52:17,402][inference][INFO] - + Generation pass latency: 4.87e-01 (s) -[2023-09-05 18:52:17,403][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-09-05 18:52:17,403][inference][INFO] - Saving inference results -[2023-09-05 18:52:17,415][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 83e152d48c2db748c142db2475b2b17d65c4fe4b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index f8a2f66f16f1cdc5ff153861dd6201810ad2a443..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.65318399999995,0.00315,317.0 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/main.log deleted file mode 100644 index 6921ea1b32a4498bbf681b6e9ef4ae77c2bb602c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:53:26,956][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:53:26,957][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:53:28,211][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:53:28,212][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:53:28,212][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:53:28,212][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:53:28,212][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:53:28,212][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:53:28,836][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:53:28,836][inference][INFO] - Running inference benchmark -[2023-09-05 18:53:28,960][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:53:28,961][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:53:29,023][inference][INFO] - + Forward pass peak memory: 466.65318399999995 (MB) -[2023-09-05 18:53:29,024][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:53:29,026][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:53:29,058][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:53:34,113][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-09-05 18:53:34,114][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-09-05 18:53:34,115][inference][INFO] - Saving inference results -[2023-09-05 18:53:34,128][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 9a4ee4221e71844f971c97fb82e8be645e7b5ef6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 3c104cee0cdec686cb09d6cfc0c7ad59f10e91cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.70176,0.00349,1150.0 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/main.log deleted file mode 100644 index 0ada45f9e0951cd5745e15218fa1c1befbde5351..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:53:34,512][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:53:34,513][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:53:34,990][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:53:34,990][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:53:34,990][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:53:34,990][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:53:34,991][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:53:34,991][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:53:35,113][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:53:35,114][inference][INFO] - Running inference benchmark -[2023-09-05 18:53:35,240][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:53:35,242][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:53:35,288][inference][INFO] - + Forward pass peak memory: 467.70176 (MB) -[2023-09-05 18:53:35,289][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:53:35,290][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:53:35,327][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:53:40,374][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-09-05 18:53:40,375][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-09-05 18:53:40,375][inference][INFO] - Saving inference results -[2023-09-05 18:53:40,383][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 165c3538004f0bba3024eb954a97a3f7545e76c7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 58b8a0de593da833428381e3a186de61f3a0cb45..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.368832,0.00397,252.0,0.502,199.0 diff --git a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 130304c7f2d6d3642e009b0c901a78757a93ed1e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_17:50:08_6206f599e1f45b619f72f9d194929e545549416f/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 18:53:45,278][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:53:45,279][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:53:46,718][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 18:53:46,718][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:53:46,718][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:53:46,719][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:53:46,719][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:53:46,719][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:53:47,362][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:53:47,363][inference][INFO] - Running inference benchmark -[2023-09-05 18:53:47,567][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:53:47,616][inference][INFO] - + Forward pass peak memory: 469.368832 (MB) -[2023-09-05 18:53:47,618][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:53:47,663][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:53:52,715][inference][INFO] - + Forward pass latency: 3.97e-03 (s) -[2023-09-05 18:53:52,716][inference][INFO] - + Forward pass throughput: 252.00 (samples/s) -[2023-09-05 18:53:52,717][inference][INFO] - + Warming up the generation pass -[2023-09-05 18:53:53,311][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 18:53:58,334][inference][INFO] - + Generation pass latency: 5.02e-01 (s) -[2023-09-05 18:53:58,335][inference][INFO] - + Generation pass throughput: 199.00 (tokens/s) -[2023-09-05 18:53:58,336][inference][INFO] - Saving inference results -[2023-09-05 18:53:58,348][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index d56c963abfdbe5d37177a203b024b11d58b31fb2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c26345be4891581cb484225073d84812bd9cf605..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.956288,0.00378,265.0 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/main.log deleted file mode 100644 index e01e7cccdedc95b1fef6fc1c2a7c45adf8ff551b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:55:08,208][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:55:08,210][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:55:09,443][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:55:09,444][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:55:09,444][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:55:09,444][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:55:09,444][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:55:09,445][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:55:10,059][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:55:10,060][inference][INFO] - Running inference benchmark -[2023-09-05 18:55:10,201][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:55:10,202][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:55:10,267][inference][INFO] - + Forward pass peak memory: 466.956288 (MB) -[2023-09-05 18:55:10,268][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:55:10,270][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:55:10,309][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:55:15,355][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-09-05 18:55:15,356][inference][INFO] - + Forward pass throughput: 265.00 (samples/s) -[2023-09-05 18:55:15,356][inference][INFO] - Saving inference results -[2023-09-05 18:55:15,366][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 83d4fc310316269994f8fcb68e8b972522b05106..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b76036866d7c0e9e4f52e545322873a9381bce4f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.91884799999997,0.00419,955.0 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 110db494556fe30ee40cc322b1c0ad125d309072..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 18:55:15,743][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:55:15,744][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:55:16,199][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 18:55:16,199][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:55:16,199][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:55:16,199][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:55:16,199][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:55:16,200][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:55:16,322][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:55:16,323][inference][INFO] - Running inference benchmark -[2023-09-05 18:55:16,454][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:55:16,455][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:55:16,500][inference][INFO] - + Forward pass peak memory: 467.91884799999997 (MB) -[2023-09-05 18:55:16,501][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 18:55:16,503][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:55:16,546][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:55:21,589][inference][INFO] - + Forward pass latency: 4.19e-03 (s) -[2023-09-05 18:55:21,590][inference][INFO] - + Forward pass throughput: 955.00 (samples/s) -[2023-09-05 18:55:21,590][inference][INFO] - Saving inference results -[2023-09-05 18:55:21,598][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9fbb7def73fe601ae9ec40a9b9d96dd7dba9a1b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index df37b23bbfd15ce347622a5441f98960ef3f0e3c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.1968,0.00317,315.0,0.522,192.0 diff --git a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 9899705c60517ec9522656329d128ad0c5f7d82d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_18:16:00_d0354e5e86842b757cec1ecb7de314a1f2421c1e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 18:55:26,379][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 18:55:26,380][benchmark][INFO] - + Setting seed(42) -[2023-09-05 18:55:27,768][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 18:55:27,768][backend][INFO] - Configuring pytorch backend -[2023-09-05 18:55:27,769][backend][INFO] - + Checking initial device isolation -[2023-09-05 18:55:27,769][backend][INFO] - + Checking contineous device isolation -[2023-09-05 18:55:27,769][pytorch][INFO] - + Disabling gradients -[2023-09-05 18:55:27,769][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 18:55:28,408][pytorch][INFO] - + Turning on eval mode -[2023-09-05 18:55:28,409][inference][INFO] - Running inference benchmark -[2023-09-05 18:55:28,608][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 18:55:28,657][inference][INFO] - + Forward pass peak memory: 469.1968 (MB) -[2023-09-05 18:55:28,659][inference][INFO] - + Warming up the forward pass -[2023-09-05 18:55:28,692][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 18:55:33,744][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-09-05 18:55:33,746][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-09-05 18:55:33,746][inference][INFO] - + Warming up the generation pass -[2023-09-05 18:55:34,243][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 18:55:39,464][inference][INFO] - + Generation pass latency: 5.22e-01 (s) -[2023-09-05 18:55:39,464][inference][INFO] - + Generation pass throughput: 192.00 (tokens/s) -[2023-09-05 18:55:39,464][inference][INFO] - Saving inference results -[2023-09-05 18:55:39,476][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 55f3368237350b093ddbbccf09b32249104b2477..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 05f36034b2a4bff18e638033b48ee755617265aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.845696,0.00319,313.0 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/main.log deleted file mode 100644 index 9c4b23aede676cd739f44d5d441e7798c02cca28..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 20:50:18,416][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:50:18,417][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:50:19,631][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 20:50:19,631][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:50:19,631][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:50:19,631][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:50:19,632][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:50:19,632][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:50:20,501][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:50:20,501][inference][INFO] - Running inference benchmark -[2023-09-05 20:50:20,628][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:50:20,630][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:50:20,696][inference][INFO] - + Forward pass peak memory: 466.845696 (MB) -[2023-09-05 20:50:20,697][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:50:20,699][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:50:20,737][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:50:25,789][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-09-05 20:50:25,790][inference][INFO] - + Forward pass throughput: 313.00 (samples/s) -[2023-09-05 20:50:25,790][inference][INFO] - Saving inference results -[2023-09-05 20:50:25,801][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 011831f0cb9c0c1063cd60cacbbf23e76b8f75fe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 181706bd1337cb6bb83a9277764dd962fc10535d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.894272,0.00354,1130.0 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/main.log deleted file mode 100644 index e4e2421e882a13b319829598e5dd3904d87d0118..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 20:50:26,165][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:50:26,165][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:50:26,607][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 20:50:26,607][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:50:26,607][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:50:26,608][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:50:26,608][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:50:26,608][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:50:26,721][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:50:26,722][inference][INFO] - Running inference benchmark -[2023-09-05 20:50:26,842][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:50:26,843][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:50:26,882][inference][INFO] - + Forward pass peak memory: 467.894272 (MB) -[2023-09-05 20:50:26,883][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:50:26,885][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:50:26,920][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:50:31,966][inference][INFO] - + Forward pass latency: 3.54e-03 (s) -[2023-09-05 20:50:31,968][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-09-05 20:50:31,968][inference][INFO] - Saving inference results -[2023-09-05 20:50:31,975][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2a030dbe72ceeb223d82c93d292fc9630d357112..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 32812427271f7be7e74f3461571dc2d5cef10b40..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.348352,0.00324,309.0,0.485,206.0 diff --git a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index aa610ab4240bcd56cc7b18572bb6f42cd390e349..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:21:33_4fa0aff21ee083d0197a898cdf17ff476fae2ac3/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 20:50:36,844][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:50:36,845][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:50:38,259][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 20:50:38,260][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:50:38,260][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:50:38,260][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:50:38,261][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:50:38,261][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:50:38,953][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:50:38,953][inference][INFO] - Running inference benchmark -[2023-09-05 20:50:39,156][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:50:39,202][inference][INFO] - + Forward pass peak memory: 469.348352 (MB) -[2023-09-05 20:50:39,203][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:50:39,239][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:50:44,291][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-05 20:50:44,293][inference][INFO] - + Forward pass throughput: 309.00 (samples/s) -[2023-09-05 20:50:44,294][inference][INFO] - + Warming up the generation pass -[2023-09-05 20:50:44,788][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 20:50:50,125][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-09-05 20:50:50,126][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-09-05 20:50:50,126][inference][INFO] - Saving inference results -[2023-09-05 20:50:50,139][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9cf9910538771585a0c209175356787eeaa58379..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 7a7cb88ad0553f4ba713d78c92d1d2359747657b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.51392,0.00384,260.0 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0b29deb6d52634927adb5c76e8b8082165f8f583..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 20:52:01,389][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:52:01,390][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:52:02,727][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 20:52:02,727][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:52:02,727][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:52:02,728][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:52:02,728][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:52:02,728][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:52:03,345][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:52:03,346][inference][INFO] - Running inference benchmark -[2023-09-05 20:52:03,475][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:52:03,476][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:52:03,534][inference][INFO] - + Forward pass peak memory: 466.51392 (MB) -[2023-09-05 20:52:03,535][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:52:03,537][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:52:03,575][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:52:08,623][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-09-05 20:52:08,625][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-09-05 20:52:08,625][inference][INFO] - Saving inference results -[2023-09-05 20:52:08,636][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index da94c23185b6fa580f810ed05651412a5ca2c0dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index eb5a9ad2f4171e6d478dd92a601595a8af9da814..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.44780799999995,0.00429,932.0 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/main.log deleted file mode 100644 index aab78a0d3e4ace3ee430c15ca3bad985d8f88d94..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 20:52:09,010][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:52:09,012][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:52:09,448][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 20:52:09,448][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:52:09,449][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:52:09,449][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:52:09,449][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:52:09,449][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:52:09,572][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:52:09,572][inference][INFO] - Running inference benchmark -[2023-09-05 20:52:09,695][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:52:09,697][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:52:09,738][inference][INFO] - + Forward pass peak memory: 467.44780799999995 (MB) -[2023-09-05 20:52:09,739][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:52:09,741][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:52:09,785][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:52:14,824][inference][INFO] - + Forward pass latency: 4.29e-03 (s) -[2023-09-05 20:52:14,826][inference][INFO] - + Forward pass throughput: 932.00 (samples/s) -[2023-09-05 20:52:14,826][inference][INFO] - Saving inference results -[2023-09-05 20:52:14,835][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6b86a7319e58b4796fb4236063690dad9e7cc5c4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e6161767f844df6bab8b2f3e34f1cc1770053cbc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.21727999999996,0.00314,318.0,0.485,206.0 diff --git a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 7be4a46aea9aabfc10f442e9727bef7eae0ab371..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_19:50:14_b8def689346c45958268ec389ee6242bddc6d78c/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 20:52:19,653][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:52:19,654][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:52:21,345][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 20:52:21,345][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:52:21,345][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:52:21,346][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:52:21,346][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:52:21,346][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:52:21,988][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:52:21,989][inference][INFO] - Running inference benchmark -[2023-09-05 20:52:22,183][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:52:22,226][inference][INFO] - + Forward pass peak memory: 469.21727999999996 (MB) -[2023-09-05 20:52:22,228][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:52:22,259][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:52:27,310][inference][INFO] - + Forward pass latency: 3.14e-03 (s) -[2023-09-05 20:52:27,313][inference][INFO] - + Forward pass throughput: 318.00 (samples/s) -[2023-09-05 20:52:27,314][inference][INFO] - + Warming up the generation pass -[2023-09-05 20:52:27,805][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 20:52:33,145][inference][INFO] - + Generation pass latency: 4.85e-01 (s) -[2023-09-05 20:52:33,146][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-09-05 20:52:33,146][inference][INFO] - Saving inference results -[2023-09-05 20:52:33,159][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index a1dddac3cacd589cb27bd5b8047c4a88b1a4c2f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index f7d117b6bad6f76989d7742aa29ef22d0bae449f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.247104,0.00353,283.0 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/main.log b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/main.log deleted file mode 100644 index 0b4b2a91abffcfac79d036b3e5caa2b2de0c85e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 20:53:41,838][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:53:41,839][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:53:43,083][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 20:53:43,083][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:53:43,083][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:53:43,084][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:53:43,084][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:53:43,084][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:53:43,692][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:53:43,693][inference][INFO] - Running inference benchmark -[2023-09-05 20:53:43,816][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:53:43,818][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:53:43,873][inference][INFO] - + Forward pass peak memory: 467.247104 (MB) -[2023-09-05 20:53:43,874][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:53:43,876][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:53:43,907][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:53:48,957][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-09-05 20:53:48,958][inference][INFO] - + Forward pass throughput: 283.00 (samples/s) -[2023-09-05 20:53:48,959][inference][INFO] - Saving inference results -[2023-09-05 20:53:48,969][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 9bf2ac9b9d67754f4b88bc772f97e21a8fa263f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a0d2fe3e4851058c7ddb8f38bf3b09e73bf072db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.23424,0.00415,964.0 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/main.log b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/main.log deleted file mode 100644 index fe31776b6e80a6eb54c74228f84f7b2a3726f68f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-05 20:53:49,347][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:53:49,348][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:53:49,808][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-05 20:53:49,809][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:53:49,809][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:53:49,809][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:53:49,809][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:53:49,809][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:53:49,932][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:53:49,933][inference][INFO] - Running inference benchmark -[2023-09-05 20:53:50,056][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:53:50,057][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:53:50,099][inference][INFO] - + Forward pass peak memory: 468.23424 (MB) -[2023-09-05 20:53:50,100][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-05 20:53:50,102][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:53:50,144][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:53:55,185][inference][INFO] - + Forward pass latency: 4.15e-03 (s) -[2023-09-05 20:53:55,186][inference][INFO] - + Forward pass throughput: 964.00 (samples/s) -[2023-09-05 20:53:55,186][inference][INFO] - Saving inference results -[2023-09-05 20:53:55,194][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 60feb4acfb709b57d9266add4d5f9e33ba3b8be8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 392e979166a8823bee670537db3271af916b5a89..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.42208,0.00394,254.0,0.532,188.0 diff --git a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index fb2cf53bad32896eb849e0be0827c71b18471646..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-05_20:47:48_172f42c512e1bf32554ef910fe82f07916b4d4af/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-05 20:54:00,231][benchmark][INFO] - Configuring inference benchmark -[2023-09-05 20:54:00,232][benchmark][INFO] - + Setting seed(42) -[2023-09-05 20:54:01,708][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-05 20:54:01,709][backend][INFO] - Configuring pytorch backend -[2023-09-05 20:54:01,709][backend][INFO] - + Checking initial device isolation -[2023-09-05 20:54:01,709][backend][INFO] - + Checking contineous device isolation -[2023-09-05 20:54:01,709][pytorch][INFO] - + Disabling gradients -[2023-09-05 20:54:01,709][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-05 20:54:02,373][pytorch][INFO] - + Turning on eval mode -[2023-09-05 20:54:02,374][inference][INFO] - Running inference benchmark -[2023-09-05 20:54:02,572][inference][INFO] - + Tracking forward pass peak memory -[2023-09-05 20:54:02,622][inference][INFO] - + Forward pass peak memory: 469.42208 (MB) -[2023-09-05 20:54:02,624][inference][INFO] - + Warming up the forward pass -[2023-09-05 20:54:02,667][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-05 20:54:07,710][inference][INFO] - + Forward pass latency: 3.94e-03 (s) -[2023-09-05 20:54:07,711][inference][INFO] - + Forward pass throughput: 254.00 (samples/s) -[2023-09-05 20:54:07,711][inference][INFO] - + Warming up the generation pass -[2023-09-05 20:54:08,311][inference][INFO] - + Tracking generation latency and throughput -[2023-09-05 20:54:13,635][inference][INFO] - + Generation pass latency: 5.32e-01 (s) -[2023-09-05 20:54:13,636][inference][INFO] - + Generation pass throughput: 188.00 (tokens/s) -[2023-09-05 20:54:13,636][inference][INFO] - Saving inference results -[2023-09-05 20:54:13,649][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 889b873f52923eac2d2bf36d971d713c961e0c32..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 32a6d3acaf25cadf041c537dec0dbe8b4b788d0a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.460672,0.00349,287.0 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/main.log deleted file mode 100644 index 43313d37ed2a739a610a238d8e111aa7b49c368f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 10:49:53,360][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 10:49:53,362][benchmark][INFO] - + Setting seed(42) -[2023-09-06 10:49:54,625][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 10:49:54,626][backend][INFO] - Configuring pytorch backend -[2023-09-06 10:49:54,626][backend][INFO] - + Checking initial device isolation -[2023-09-06 10:49:54,626][backend][INFO] - + Checking contineous device isolation -[2023-09-06 10:49:54,626][pytorch][INFO] - + Disabling gradients -[2023-09-06 10:49:54,627][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 10:49:55,250][pytorch][INFO] - + Turning on eval mode -[2023-09-06 10:49:55,251][inference][INFO] - Running inference benchmark -[2023-09-06 10:49:55,378][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 10:49:55,380][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 10:49:55,447][inference][INFO] - + Forward pass peak memory: 466.460672 (MB) -[2023-09-06 10:49:55,448][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 10:49:55,450][inference][INFO] - + Warming up the forward pass -[2023-09-06 10:49:55,487][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 10:50:00,533][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-09-06 10:50:00,535][inference][INFO] - + Forward pass throughput: 287.00 (samples/s) -[2023-09-06 10:50:00,535][inference][INFO] - Saving inference results -[2023-09-06 10:50:00,545][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index e0fcde8d8ef00ba1ca775da67736e312b1ed5938..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ae07c683bb0bb8c78bc5407fc8e68ce1052db63a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.3536,0.0037,1080.0 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/main.log deleted file mode 100644 index 34545f3e30bddaf9c338ada2b4ce549d1c4e0fcd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 10:50:00,941][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 10:50:00,942][benchmark][INFO] - + Setting seed(42) -[2023-09-06 10:50:01,435][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 10:50:01,436][backend][INFO] - Configuring pytorch backend -[2023-09-06 10:50:01,436][backend][INFO] - + Checking initial device isolation -[2023-09-06 10:50:01,436][backend][INFO] - + Checking contineous device isolation -[2023-09-06 10:50:01,436][pytorch][INFO] - + Disabling gradients -[2023-09-06 10:50:01,436][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 10:50:01,554][pytorch][INFO] - + Turning on eval mode -[2023-09-06 10:50:01,555][inference][INFO] - Running inference benchmark -[2023-09-06 10:50:01,677][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 10:50:01,678][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 10:50:01,723][inference][INFO] - + Forward pass peak memory: 467.3536 (MB) -[2023-09-06 10:50:01,725][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 10:50:01,727][inference][INFO] - + Warming up the forward pass -[2023-09-06 10:50:01,764][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 10:50:06,807][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-09-06 10:50:06,809][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-09-06 10:50:06,809][inference][INFO] - Saving inference results -[2023-09-06 10:50:06,817][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 4c105ef93ea317917eb0eff66b3dffb4900d0ba9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5ca458082c0ddaea1846101358500de14694b9c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.38111999999995,0.0032,312.0,0.488,205.0 diff --git a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 2ca1f9577fea86a1a88e3d86a5a8be564ea66f61..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_09:55:11_f6295c6c535c2b036a4533327ab5a92c6b199b78/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 10:50:11,650][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 10:50:11,651][benchmark][INFO] - + Setting seed(42) -[2023-09-06 10:50:13,076][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 10:50:13,076][backend][INFO] - Configuring pytorch backend -[2023-09-06 10:50:13,076][backend][INFO] - + Checking initial device isolation -[2023-09-06 10:50:13,076][backend][INFO] - + Checking contineous device isolation -[2023-09-06 10:50:13,077][pytorch][INFO] - + Disabling gradients -[2023-09-06 10:50:13,077][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 10:50:13,735][pytorch][INFO] - + Turning on eval mode -[2023-09-06 10:50:13,736][inference][INFO] - Running inference benchmark -[2023-09-06 10:50:14,002][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 10:50:14,053][inference][INFO] - + Forward pass peak memory: 469.38111999999995 (MB) -[2023-09-06 10:50:14,054][inference][INFO] - + Warming up the forward pass -[2023-09-06 10:50:14,093][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 10:50:19,142][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-09-06 10:50:19,144][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-06 10:50:19,145][inference][INFO] - + Warming up the generation pass -[2023-09-06 10:50:19,644][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 10:50:25,012][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-09-06 10:50:25,014][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-09-06 10:50:25,014][inference][INFO] - Saving inference results -[2023-09-06 10:50:25,026][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index fb450f4fc918b35bd294eed128aef26331ca7b50..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 5797f771a5f5fd1d927221717867586452eda817..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.66547199999997,0.0036,278.0 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/main.log deleted file mode 100644 index e2dff9ae071f7c6de8b684c7d47b8df213e0a039..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 12:59:37,192][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 12:59:37,193][benchmark][INFO] - + Setting seed(42) -[2023-09-06 12:59:38,456][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 12:59:38,457][backend][INFO] - Configuring pytorch backend -[2023-09-06 12:59:38,457][backend][INFO] - + Checking initial device isolation -[2023-09-06 12:59:38,457][backend][INFO] - + Checking contineous device isolation -[2023-09-06 12:59:38,457][pytorch][INFO] - + Disabling gradients -[2023-09-06 12:59:38,458][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 12:59:39,108][pytorch][INFO] - + Turning on eval mode -[2023-09-06 12:59:39,109][inference][INFO] - Running inference benchmark -[2023-09-06 12:59:39,230][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 12:59:39,231][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 12:59:39,293][inference][INFO] - + Forward pass peak memory: 466.66547199999997 (MB) -[2023-09-06 12:59:39,294][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 12:59:39,295][inference][INFO] - + Warming up the forward pass -[2023-09-06 12:59:39,327][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 12:59:44,375][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-09-06 12:59:44,377][inference][INFO] - + Forward pass throughput: 278.00 (samples/s) -[2023-09-06 12:59:44,377][inference][INFO] - Saving inference results -[2023-09-06 12:59:44,390][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index aa5033394df921dba54482acda9ebf8c416b8eb1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index dc16292b9bc7063114eacfd796800b20c0e7c3d4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.652608,0.00378,1060.0 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6163860cc907b1d224e4085fcb29584933de2ace..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 12:59:44,773][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 12:59:44,774][benchmark][INFO] - + Setting seed(42) -[2023-09-06 12:59:45,219][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 12:59:45,219][backend][INFO] - Configuring pytorch backend -[2023-09-06 12:59:45,219][backend][INFO] - + Checking initial device isolation -[2023-09-06 12:59:45,220][backend][INFO] - + Checking contineous device isolation -[2023-09-06 12:59:45,220][pytorch][INFO] - + Disabling gradients -[2023-09-06 12:59:45,220][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 12:59:45,344][pytorch][INFO] - + Turning on eval mode -[2023-09-06 12:59:45,345][inference][INFO] - Running inference benchmark -[2023-09-06 12:59:45,476][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 12:59:45,477][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 12:59:45,521][inference][INFO] - + Forward pass peak memory: 467.652608 (MB) -[2023-09-06 12:59:45,522][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 12:59:45,524][inference][INFO] - + Warming up the forward pass -[2023-09-06 12:59:45,566][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 12:59:50,610][inference][INFO] - + Forward pass latency: 3.78e-03 (s) -[2023-09-06 12:59:50,612][inference][INFO] - + Forward pass throughput: 1060.00 (samples/s) -[2023-09-06 12:59:50,612][inference][INFO] - Saving inference results -[2023-09-06 12:59:50,619][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index f0081f663c9c2a91e2faecba4732c1879574b991..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index df742a7995bb1c4eeed712f63ad756a7d3fad2b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.417984,0.00381,262.0,0.49,204.0 diff --git a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 0507ff17bbe49c9b86a0206e2deeb7741b86a40d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_11:21:00_f6301b9a13b8467d1f88a6f419d76aefa15bd9b8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 12:59:55,629][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 12:59:55,630][benchmark][INFO] - + Setting seed(42) -[2023-09-06 12:59:57,076][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 12:59:57,077][backend][INFO] - Configuring pytorch backend -[2023-09-06 12:59:57,077][backend][INFO] - + Checking initial device isolation -[2023-09-06 12:59:57,077][backend][INFO] - + Checking contineous device isolation -[2023-09-06 12:59:57,077][pytorch][INFO] - + Disabling gradients -[2023-09-06 12:59:57,077][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 12:59:57,731][pytorch][INFO] - + Turning on eval mode -[2023-09-06 12:59:57,732][inference][INFO] - Running inference benchmark -[2023-09-06 12:59:57,935][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 12:59:57,982][inference][INFO] - + Forward pass peak memory: 469.417984 (MB) -[2023-09-06 12:59:57,984][inference][INFO] - + Warming up the forward pass -[2023-09-06 12:59:58,021][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 13:00:03,065][inference][INFO] - + Forward pass latency: 3.81e-03 (s) -[2023-09-06 13:00:03,067][inference][INFO] - + Forward pass throughput: 262.00 (samples/s) -[2023-09-06 13:00:03,068][inference][INFO] - + Warming up the generation pass -[2023-09-06 13:00:03,568][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 13:00:08,957][inference][INFO] - + Generation pass latency: 4.90e-01 (s) -[2023-09-06 13:00:08,959][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-09-06 13:00:08,959][inference][INFO] - Saving inference results -[2023-09-06 13:00:08,972][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 49287f2e6db672ed7d1fa9bfe9064c5adf690ba7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 03e3c703ed21af9fd67fd05d0aec1938b1f375e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.070976,0.00334,299.0 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/main.log deleted file mode 100644 index 4fd22aafd904cdc3e8d7506376a2a1d903e26162..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 13:01:20,741][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 13:01:20,741][benchmark][INFO] - + Setting seed(42) -[2023-09-06 13:01:21,987][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 13:01:21,988][backend][INFO] - Configuring pytorch backend -[2023-09-06 13:01:21,988][backend][INFO] - + Checking initial device isolation -[2023-09-06 13:01:21,988][backend][INFO] - + Checking contineous device isolation -[2023-09-06 13:01:21,988][pytorch][INFO] - + Disabling gradients -[2023-09-06 13:01:21,988][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 13:01:22,600][pytorch][INFO] - + Turning on eval mode -[2023-09-06 13:01:22,601][inference][INFO] - Running inference benchmark -[2023-09-06 13:01:22,722][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 13:01:22,723][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 13:01:22,782][inference][INFO] - + Forward pass peak memory: 467.070976 (MB) -[2023-09-06 13:01:22,783][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 13:01:22,785][inference][INFO] - + Warming up the forward pass -[2023-09-06 13:01:22,823][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 13:01:27,875][inference][INFO] - + Forward pass latency: 3.34e-03 (s) -[2023-09-06 13:01:27,877][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-09-06 13:01:27,877][inference][INFO] - Saving inference results -[2023-09-06 13:01:27,889][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 46059782872c9f30c8d1b6848ec233218f045d08..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b97f06c5688d601d188bfee125b13b3e57d65c33..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.984384,0.00359,1110.0 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/main.log deleted file mode 100644 index ccf0e1d8186f2e0dcbad212cfde278065ad28806..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 13:01:28,283][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 13:01:28,284][benchmark][INFO] - + Setting seed(42) -[2023-09-06 13:01:28,724][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 13:01:28,725][backend][INFO] - Configuring pytorch backend -[2023-09-06 13:01:28,725][backend][INFO] - + Checking initial device isolation -[2023-09-06 13:01:28,725][backend][INFO] - + Checking contineous device isolation -[2023-09-06 13:01:28,725][pytorch][INFO] - + Disabling gradients -[2023-09-06 13:01:28,725][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 13:01:28,843][pytorch][INFO] - + Turning on eval mode -[2023-09-06 13:01:28,843][inference][INFO] - Running inference benchmark -[2023-09-06 13:01:28,961][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 13:01:28,962][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 13:01:29,002][inference][INFO] - + Forward pass peak memory: 467.984384 (MB) -[2023-09-06 13:01:29,003][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 13:01:29,005][inference][INFO] - + Warming up the forward pass -[2023-09-06 13:01:29,041][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 13:01:34,088][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-09-06 13:01:34,089][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-09-06 13:01:34,089][inference][INFO] - Saving inference results -[2023-09-06 13:01:34,097][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 8fd0e294965236aafed911ebb10d2a2a072e2122..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d681f234e6aebaa45086489f0296bf998f24e0a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.21318399999996,0.00364,275.0,0.497,201.0 diff --git a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 20e6738ba2fcef95c88ba90a80a366055054fdd8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_12:37:27_842e99f1b9ee2a0fa239997ef695c5ed0bd77195/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 13:01:39,186][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 13:01:39,187][benchmark][INFO] - + Setting seed(42) -[2023-09-06 13:01:40,643][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 13:01:40,643][backend][INFO] - Configuring pytorch backend -[2023-09-06 13:01:40,643][backend][INFO] - + Checking initial device isolation -[2023-09-06 13:01:40,644][backend][INFO] - + Checking contineous device isolation -[2023-09-06 13:01:40,644][pytorch][INFO] - + Disabling gradients -[2023-09-06 13:01:40,644][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 13:01:41,283][pytorch][INFO] - + Turning on eval mode -[2023-09-06 13:01:41,284][inference][INFO] - Running inference benchmark -[2023-09-06 13:01:41,480][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 13:01:41,526][inference][INFO] - + Forward pass peak memory: 469.21318399999996 (MB) -[2023-09-06 13:01:41,528][inference][INFO] - + Warming up the forward pass -[2023-09-06 13:01:41,564][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 13:01:46,612][inference][INFO] - + Forward pass latency: 3.64e-03 (s) -[2023-09-06 13:01:46,614][inference][INFO] - + Forward pass throughput: 275.00 (samples/s) -[2023-09-06 13:01:46,615][inference][INFO] - + Warming up the generation pass -[2023-09-06 13:01:47,104][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 13:01:52,570][inference][INFO] - + Generation pass latency: 4.97e-01 (s) -[2023-09-06 13:01:52,571][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-09-06 13:01:52,571][inference][INFO] - Saving inference results -[2023-09-06 13:01:52,584][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 51f0d31d15c43121a487439004a55f60b209a2bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index e8517d2fbafbe4df49e6963f5108c8ab83bcbfea..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.976768,0.00327,306.0 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/main.log deleted file mode 100644 index 1031dfbca2f362b912c41bf181593534eae562ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 14:49:40,981][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 14:49:40,982][benchmark][INFO] - + Setting seed(42) -[2023-09-06 14:49:42,384][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 14:49:42,384][backend][INFO] - Configuring pytorch backend -[2023-09-06 14:49:42,384][backend][INFO] - + Checking initial device isolation -[2023-09-06 14:49:42,384][backend][INFO] - + Checking contineous device isolation -[2023-09-06 14:49:42,385][pytorch][INFO] - + Disabling gradients -[2023-09-06 14:49:42,385][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 14:49:42,988][pytorch][INFO] - + Turning on eval mode -[2023-09-06 14:49:42,989][inference][INFO] - Running inference benchmark -[2023-09-06 14:49:43,107][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:49:43,108][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 14:49:43,167][inference][INFO] - + Forward pass peak memory: 466.976768 (MB) -[2023-09-06 14:49:43,168][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:49:43,170][inference][INFO] - + Warming up the forward pass -[2023-09-06 14:49:43,207][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 14:49:48,255][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-09-06 14:49:48,257][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-09-06 14:49:48,257][inference][INFO] - Saving inference results -[2023-09-06 14:49:48,269][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index efb4d0952f987f66a07ed122cc221a64c418f8e7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 21c785b596f584694ff225cded00f0f95f3a4ce4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.97619199999997,0.00363,1100.0 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/main.log deleted file mode 100644 index e9ecbaf76e9582d044ddec03a2364881dbec2ef2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 14:49:48,645][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 14:49:48,646][benchmark][INFO] - + Setting seed(42) -[2023-09-06 14:49:49,075][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 14:49:49,076][backend][INFO] - Configuring pytorch backend -[2023-09-06 14:49:49,076][backend][INFO] - + Checking initial device isolation -[2023-09-06 14:49:49,076][backend][INFO] - + Checking contineous device isolation -[2023-09-06 14:49:49,076][pytorch][INFO] - + Disabling gradients -[2023-09-06 14:49:49,076][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 14:49:49,193][pytorch][INFO] - + Turning on eval mode -[2023-09-06 14:49:49,193][inference][INFO] - Running inference benchmark -[2023-09-06 14:49:49,319][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:49:49,320][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 14:49:49,362][inference][INFO] - + Forward pass peak memory: 467.97619199999997 (MB) -[2023-09-06 14:49:49,363][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:49:49,364][inference][INFO] - + Warming up the forward pass -[2023-09-06 14:49:49,415][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 14:49:54,459][inference][INFO] - + Forward pass latency: 3.63e-03 (s) -[2023-09-06 14:49:54,460][inference][INFO] - + Forward pass throughput: 1100.00 (samples/s) -[2023-09-06 14:49:54,460][inference][INFO] - Saving inference results -[2023-09-06 14:49:54,466][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 1236ba6cc6ba6841adaf7b6fd90cb4cf798ae02b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 34831b8b2cae2b6a6b62bda51e900d92c9a7c63c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,468.860928,0.00417,240.0,0.481,208.0 diff --git a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 08a572ad24f62f91243c74fee96cb9258e2696f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_13:07:29_3e203f92bed937fa13c35adee1bdc45a92d18e61/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 14:49:59,388][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 14:49:59,389][benchmark][INFO] - + Setting seed(42) -[2023-09-06 14:50:00,930][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 14:50:00,930][backend][INFO] - Configuring pytorch backend -[2023-09-06 14:50:00,930][backend][INFO] - + Checking initial device isolation -[2023-09-06 14:50:00,930][backend][INFO] - + Checking contineous device isolation -[2023-09-06 14:50:00,930][pytorch][INFO] - + Disabling gradients -[2023-09-06 14:50:00,931][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 14:50:01,562][pytorch][INFO] - + Turning on eval mode -[2023-09-06 14:50:01,562][inference][INFO] - Running inference benchmark -[2023-09-06 14:50:01,753][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 14:50:01,804][inference][INFO] - + Forward pass peak memory: 468.860928 (MB) -[2023-09-06 14:50:01,806][inference][INFO] - + Warming up the forward pass -[2023-09-06 14:50:01,842][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 14:50:06,892][inference][INFO] - + Forward pass latency: 4.17e-03 (s) -[2023-09-06 14:50:06,894][inference][INFO] - + Forward pass throughput: 240.00 (samples/s) -[2023-09-06 14:50:06,895][inference][INFO] - + Warming up the generation pass -[2023-09-06 14:50:07,402][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 14:50:12,695][inference][INFO] - + Generation pass latency: 4.81e-01 (s) -[2023-09-06 14:50:12,697][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-09-06 14:50:12,697][inference][INFO] - Saving inference results -[2023-09-06 14:50:12,708][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 1d791b6eddc4bf86fb0c95104060906f5c8a8669..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 0f4ca79d2d963fc04d03363216031248d3a9ea66..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.644992,0.00386,259.0 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/main.log deleted file mode 100644 index 02a89ba5f7ca4fae6944e746667fc80204e5c1a5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 14:51:24,704][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 14:51:24,705][benchmark][INFO] - + Setting seed(42) -[2023-09-06 14:51:25,956][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 14:51:25,956][backend][INFO] - Configuring pytorch backend -[2023-09-06 14:51:25,957][backend][INFO] - + Checking initial device isolation -[2023-09-06 14:51:25,957][backend][INFO] - + Checking contineous device isolation -[2023-09-06 14:51:25,957][pytorch][INFO] - + Disabling gradients -[2023-09-06 14:51:25,957][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 14:51:26,564][pytorch][INFO] - + Turning on eval mode -[2023-09-06 14:51:26,565][inference][INFO] - Running inference benchmark -[2023-09-06 14:51:26,685][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:51:26,687][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 14:51:26,743][inference][INFO] - + Forward pass peak memory: 466.644992 (MB) -[2023-09-06 14:51:26,744][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:51:26,746][inference][INFO] - + Warming up the forward pass -[2023-09-06 14:51:26,782][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 14:51:31,829][inference][INFO] - + Forward pass latency: 3.86e-03 (s) -[2023-09-06 14:51:31,831][inference][INFO] - + Forward pass throughput: 259.00 (samples/s) -[2023-09-06 14:51:31,831][inference][INFO] - Saving inference results -[2023-09-06 14:51:31,842][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a7a29273f0ef4417bbb2b4a71128c1dc42e14e88..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 95d31750bc7a10eaeaddbaac4586707f01783fe3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.61983999999995,0.00429,932.0 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/main.log deleted file mode 100644 index b578887d4dbb54f957b1a469105a799768a35423..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 14:51:32,233][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 14:51:32,233][benchmark][INFO] - + Setting seed(42) -[2023-09-06 14:51:32,681][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 14:51:32,681][backend][INFO] - Configuring pytorch backend -[2023-09-06 14:51:32,681][backend][INFO] - + Checking initial device isolation -[2023-09-06 14:51:32,681][backend][INFO] - + Checking contineous device isolation -[2023-09-06 14:51:32,681][pytorch][INFO] - + Disabling gradients -[2023-09-06 14:51:32,682][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 14:51:32,800][pytorch][INFO] - + Turning on eval mode -[2023-09-06 14:51:32,801][inference][INFO] - Running inference benchmark -[2023-09-06 14:51:32,926][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:51:32,927][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 14:51:32,966][inference][INFO] - + Forward pass peak memory: 467.61983999999995 (MB) -[2023-09-06 14:51:32,967][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 14:51:32,968][inference][INFO] - + Warming up the forward pass -[2023-09-06 14:51:33,011][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 14:51:38,052][inference][INFO] - + Forward pass latency: 4.29e-03 (s) -[2023-09-06 14:51:38,053][inference][INFO] - + Forward pass throughput: 932.00 (samples/s) -[2023-09-06 14:51:38,053][inference][INFO] - Saving inference results -[2023-09-06 14:51:38,060][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index bfb6290f77e9860fc1545a82758df6c35b17bd5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 246326bf31f0c8887b1323b05b46ea25af3610ad..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.192704,0.00379,264.0,0.504,198.0 diff --git a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f55e7e9e3d31d455ca3c42b019142796d455ba6f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:40:03_fa522d8d7ba512d1e103f891263602ee3f2bd46d/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 14:51:43,152][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 14:51:43,153][benchmark][INFO] - + Setting seed(42) -[2023-09-06 14:51:44,820][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 14:51:44,821][backend][INFO] - Configuring pytorch backend -[2023-09-06 14:51:44,821][backend][INFO] - + Checking initial device isolation -[2023-09-06 14:51:44,821][backend][INFO] - + Checking contineous device isolation -[2023-09-06 14:51:44,821][pytorch][INFO] - + Disabling gradients -[2023-09-06 14:51:44,821][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 14:51:45,464][pytorch][INFO] - + Turning on eval mode -[2023-09-06 14:51:45,465][inference][INFO] - Running inference benchmark -[2023-09-06 14:51:45,661][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 14:51:45,706][inference][INFO] - + Forward pass peak memory: 469.192704 (MB) -[2023-09-06 14:51:45,707][inference][INFO] - + Warming up the forward pass -[2023-09-06 14:51:45,749][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 14:51:50,792][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-09-06 14:51:50,794][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-09-06 14:51:50,795][inference][INFO] - + Warming up the generation pass -[2023-09-06 14:51:51,380][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 14:51:56,426][inference][INFO] - + Generation pass latency: 5.04e-01 (s) -[2023-09-06 14:51:56,427][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s) -[2023-09-06 14:51:56,427][inference][INFO] - Saving inference results -[2023-09-06 14:51:56,438][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index d85672fa03e692457ffa25153f9006087cba94df..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 106bbc5cf120da0b9c48beffbb588ec7ff048cb0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.751488,0.0032,312.0 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/main.log deleted file mode 100644 index d55b1765a2943b6d58263504ae2f77c3d8d16caf..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 16:50:03,336][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 16:50:03,337][benchmark][INFO] - + Setting seed(42) -[2023-09-06 16:50:04,995][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 16:50:04,995][backend][INFO] - Configuring pytorch backend -[2023-09-06 16:50:04,995][backend][INFO] - + Checking initial device isolation -[2023-09-06 16:50:04,996][backend][INFO] - + Checking contineous device isolation -[2023-09-06 16:50:04,996][pytorch][INFO] - + Disabling gradients -[2023-09-06 16:50:04,996][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 16:50:05,621][pytorch][INFO] - + Turning on eval mode -[2023-09-06 16:50:05,621][inference][INFO] - Running inference benchmark -[2023-09-06 16:50:05,740][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:50:05,741][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 16:50:05,802][inference][INFO] - + Forward pass peak memory: 466.751488 (MB) -[2023-09-06 16:50:05,803][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:50:05,805][inference][INFO] - + Warming up the forward pass -[2023-09-06 16:50:05,843][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 16:50:10,892][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-09-06 16:50:10,893][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-06 16:50:10,893][inference][INFO] - Saving inference results -[2023-09-06 16:50:10,904][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 317c4d65ec278d09f1fba729338daeb03d47fe34..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d61f6c4749e1d6cf568740d957c633cdff29a13a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.68537599999996,0.0036,1110.0 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 8019eecb68d6bed6d752d0f77eabf9571532e175..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 16:50:11,282][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 16:50:11,283][benchmark][INFO] - + Setting seed(42) -[2023-09-06 16:50:11,728][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 16:50:11,728][backend][INFO] - Configuring pytorch backend -[2023-09-06 16:50:11,728][backend][INFO] - + Checking initial device isolation -[2023-09-06 16:50:11,728][backend][INFO] - + Checking contineous device isolation -[2023-09-06 16:50:11,728][pytorch][INFO] - + Disabling gradients -[2023-09-06 16:50:11,729][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 16:50:11,844][pytorch][INFO] - + Turning on eval mode -[2023-09-06 16:50:11,845][inference][INFO] - Running inference benchmark -[2023-09-06 16:50:11,971][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:50:11,973][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 16:50:12,019][inference][INFO] - + Forward pass peak memory: 467.68537599999996 (MB) -[2023-09-06 16:50:12,020][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:50:12,022][inference][INFO] - + Warming up the forward pass -[2023-09-06 16:50:12,074][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 16:50:17,118][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-09-06 16:50:17,119][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-09-06 16:50:17,119][inference][INFO] - Saving inference results -[2023-09-06 16:50:17,128][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e1c1e445c4d87753ca6102f5e4d77a9a9384e61c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 245a43beb10e2a370abd22257862aec73f73b124..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.16403199999996,0.00335,299.0,0.492,203.0 diff --git a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 032e84834de6356ecfd3fbb41a3576223bb46629..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_14:49:04_300d6a4a62aac89b3f439110561d5a2268ffad9e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 16:50:22,212][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 16:50:22,212][benchmark][INFO] - + Setting seed(42) -[2023-09-06 16:50:23,943][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 16:50:23,943][backend][INFO] - Configuring pytorch backend -[2023-09-06 16:50:23,943][backend][INFO] - + Checking initial device isolation -[2023-09-06 16:50:23,943][backend][INFO] - + Checking contineous device isolation -[2023-09-06 16:50:23,944][pytorch][INFO] - + Disabling gradients -[2023-09-06 16:50:23,944][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 16:50:24,591][pytorch][INFO] - + Turning on eval mode -[2023-09-06 16:50:24,592][inference][INFO] - Running inference benchmark -[2023-09-06 16:50:24,835][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 16:50:24,883][inference][INFO] - + Forward pass peak memory: 469.16403199999996 (MB) -[2023-09-06 16:50:24,884][inference][INFO] - + Warming up the forward pass -[2023-09-06 16:50:24,917][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 16:50:29,967][inference][INFO] - + Forward pass latency: 3.35e-03 (s) -[2023-09-06 16:50:29,969][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-09-06 16:50:29,970][inference][INFO] - + Warming up the generation pass -[2023-09-06 16:50:30,475][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 16:50:35,883][inference][INFO] - + Generation pass latency: 4.92e-01 (s) -[2023-09-06 16:50:35,885][inference][INFO] - + Generation pass throughput: 203.00 (tokens/s) -[2023-09-06 16:50:35,885][inference][INFO] - Saving inference results -[2023-09-06 16:50:35,897][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5c6c0559392aa20ed3d7e1e0e5c7f603c7da7cbd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 67d4412ce6dd4cfc573de24b7c0b8f14929959a1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.501056,0.00316,316.0 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/main.log b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/main.log deleted file mode 100644 index e8b42a921be35492c017f3bb397784a24d8e8d36..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 16:51:46,677][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 16:51:46,678][benchmark][INFO] - + Setting seed(42) -[2023-09-06 16:51:48,001][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 16:51:48,002][backend][INFO] - Configuring pytorch backend -[2023-09-06 16:51:48,002][backend][INFO] - + Checking initial device isolation -[2023-09-06 16:51:48,002][backend][INFO] - + Checking contineous device isolation -[2023-09-06 16:51:48,002][pytorch][INFO] - + Disabling gradients -[2023-09-06 16:51:48,002][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 16:51:48,621][pytorch][INFO] - + Turning on eval mode -[2023-09-06 16:51:48,621][inference][INFO] - Running inference benchmark -[2023-09-06 16:51:48,743][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:51:48,745][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 16:51:48,810][inference][INFO] - + Forward pass peak memory: 467.501056 (MB) -[2023-09-06 16:51:48,811][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:51:48,813][inference][INFO] - + Warming up the forward pass -[2023-09-06 16:51:48,848][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 16:51:53,902][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-09-06 16:51:53,904][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-09-06 16:51:53,904][inference][INFO] - Saving inference results -[2023-09-06 16:51:53,916][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index a5eb705e37e23ddb030e06c904d2c6a6c5ceccda..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index b13898b4fcb7247eb4ac594d28719730a0ed7768..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.53324799999996,0.00355,1130.0 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/main.log b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/main.log deleted file mode 100644 index 6ac6cec50229079a46c0969919c5ce917e0d0687..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-06 16:51:54,288][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 16:51:54,290][benchmark][INFO] - + Setting seed(42) -[2023-09-06 16:51:54,714][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-06 16:51:54,714][backend][INFO] - Configuring pytorch backend -[2023-09-06 16:51:54,714][backend][INFO] - + Checking initial device isolation -[2023-09-06 16:51:54,714][backend][INFO] - + Checking contineous device isolation -[2023-09-06 16:51:54,715][pytorch][INFO] - + Disabling gradients -[2023-09-06 16:51:54,715][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 16:51:54,829][pytorch][INFO] - + Turning on eval mode -[2023-09-06 16:51:54,829][inference][INFO] - Running inference benchmark -[2023-09-06 16:51:54,950][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:51:54,952][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 16:51:55,000][inference][INFO] - + Forward pass peak memory: 468.53324799999996 (MB) -[2023-09-06 16:51:55,001][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-06 16:51:55,002][inference][INFO] - + Warming up the forward pass -[2023-09-06 16:51:55,039][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 16:52:00,084][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-09-06 16:52:00,085][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-09-06 16:52:00,085][inference][INFO] - Saving inference results -[2023-09-06 16:52:00,093][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9309a0ca18f8f3f5a51d16df3905f609561bed99..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index e96156647f298e9eb6403aa7802d8ee755b98d85..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.23776,0.00318,314.0,0.49,204.0 diff --git a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index d07e18a793c6937ec9e3dc65d3d90268ee60e6a7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-06_15:45:47_fa6107c97edf7cf725305a34735a57875b67d85e/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-06 16:52:05,108][benchmark][INFO] - Configuring inference benchmark -[2023-09-06 16:52:05,109][benchmark][INFO] - + Setting seed(42) -[2023-09-06 16:52:06,548][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-06 16:52:06,549][backend][INFO] - Configuring pytorch backend -[2023-09-06 16:52:06,549][backend][INFO] - + Checking initial device isolation -[2023-09-06 16:52:06,549][backend][INFO] - + Checking contineous device isolation -[2023-09-06 16:52:06,549][pytorch][INFO] - + Disabling gradients -[2023-09-06 16:52:06,550][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-06 16:52:07,203][pytorch][INFO] - + Turning on eval mode -[2023-09-06 16:52:07,203][inference][INFO] - Running inference benchmark -[2023-09-06 16:52:07,392][inference][INFO] - + Tracking forward pass peak memory -[2023-09-06 16:52:07,443][inference][INFO] - + Forward pass peak memory: 469.23776 (MB) -[2023-09-06 16:52:07,445][inference][INFO] - + Warming up the forward pass -[2023-09-06 16:52:07,482][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-06 16:52:12,533][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-09-06 16:52:12,535][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-09-06 16:52:12,535][inference][INFO] - + Warming up the generation pass -[2023-09-06 16:52:13,031][inference][INFO] - + Tracking generation latency and throughput -[2023-09-06 16:52:18,421][inference][INFO] - + Generation pass latency: 4.90e-01 (s) -[2023-09-06 16:52:18,423][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-09-06 16:52:18,423][inference][INFO] - Saving inference results -[2023-09-06 16:52:18,434][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 76b0e8645dbf7594d1bc9703f83be32a54c3fbb5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 634af756e4e7a73486a5868447c2b8de6aaf1d13..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.43199999999996,0.00318,314.0 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/main.log deleted file mode 100644 index f375a5796e6905c544c9ab4f40695d89f2c7dd30..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-07 04:49:47,138][benchmark][INFO] - Configuring inference benchmark -[2023-09-07 04:49:47,139][benchmark][INFO] - + Setting seed(42) -[2023-09-07 04:49:48,437][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-07 04:49:48,437][backend][INFO] - Configuring pytorch backend -[2023-09-07 04:49:48,438][backend][INFO] - + Checking initial device isolation -[2023-09-07 04:49:48,438][backend][INFO] - + Checking contineous device isolation -[2023-09-07 04:49:48,438][pytorch][INFO] - + Disabling gradients -[2023-09-07 04:49:48,438][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-07 04:49:49,092][pytorch][INFO] - + Turning on eval mode -[2023-09-07 04:49:49,093][inference][INFO] - Running inference benchmark -[2023-09-07 04:49:49,218][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-07 04:49:49,219][inference][INFO] - + Tracking forward pass peak memory -[2023-09-07 04:49:49,280][inference][INFO] - + Forward pass peak memory: 466.43199999999996 (MB) -[2023-09-07 04:49:49,281][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-07 04:49:49,282][inference][INFO] - + Warming up the forward pass -[2023-09-07 04:49:49,315][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-07 04:49:54,365][inference][INFO] - + Forward pass latency: 3.18e-03 (s) -[2023-09-07 04:49:54,366][inference][INFO] - + Forward pass throughput: 314.00 (samples/s) -[2023-09-07 04:49:54,366][inference][INFO] - Saving inference results -[2023-09-07 04:49:54,377][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 41e48d3b7e6c0bcb9c36dc3140ec7bab5e364a27..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ba9d7e645f889cb17e96aa1b6d1e2d75a891e426..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.45599999999996,0.00352,1140.0 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/main.log deleted file mode 100644 index d89297b5c3b531f87cf35e5acdfe062142022c74..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-07 04:49:54,763][benchmark][INFO] - Configuring inference benchmark -[2023-09-07 04:49:54,764][benchmark][INFO] - + Setting seed(42) -[2023-09-07 04:49:55,238][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-07 04:49:55,238][backend][INFO] - Configuring pytorch backend -[2023-09-07 04:49:55,238][backend][INFO] - + Checking initial device isolation -[2023-09-07 04:49:55,238][backend][INFO] - + Checking contineous device isolation -[2023-09-07 04:49:55,238][pytorch][INFO] - + Disabling gradients -[2023-09-07 04:49:55,239][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-07 04:49:55,358][pytorch][INFO] - + Turning on eval mode -[2023-09-07 04:49:55,359][inference][INFO] - Running inference benchmark -[2023-09-07 04:49:55,483][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-07 04:49:55,485][inference][INFO] - + Tracking forward pass peak memory -[2023-09-07 04:49:55,528][inference][INFO] - + Forward pass peak memory: 467.45599999999996 (MB) -[2023-09-07 04:49:55,529][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-07 04:49:55,530][inference][INFO] - + Warming up the forward pass -[2023-09-07 04:49:55,567][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-07 04:50:00,613][inference][INFO] - + Forward pass latency: 3.52e-03 (s) -[2023-09-07 04:50:00,614][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-09-07 04:50:00,614][inference][INFO] - Saving inference results -[2023-09-07 04:50:00,621][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 87fc79b7fca64e146f4eaf58ca28586921014c56..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 6d7da48b3a8e62f07ab19b3ac4180fcacb76afec..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.303296,0.00312,321.0,0.497,201.0 diff --git a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 69c8079d840418a8e39765a6f400ac7b9431149d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_04:22:53_e3a9716384146b89f21a39bdf13dd4b1cac740bb/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-07 04:50:05,538][benchmark][INFO] - Configuring inference benchmark -[2023-09-07 04:50:05,540][benchmark][INFO] - + Setting seed(42) -[2023-09-07 04:50:07,079][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-07 04:50:07,079][backend][INFO] - Configuring pytorch backend -[2023-09-07 04:50:07,079][backend][INFO] - + Checking initial device isolation -[2023-09-07 04:50:07,079][backend][INFO] - + Checking contineous device isolation -[2023-09-07 04:50:07,079][pytorch][INFO] - + Disabling gradients -[2023-09-07 04:50:07,080][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-07 04:50:08,420][pytorch][INFO] - + Turning on eval mode -[2023-09-07 04:50:08,421][inference][INFO] - Running inference benchmark -[2023-09-07 04:50:08,712][inference][INFO] - + Tracking forward pass peak memory -[2023-09-07 04:50:08,772][inference][INFO] - + Forward pass peak memory: 469.303296 (MB) -[2023-09-07 04:50:08,773][inference][INFO] - + Warming up the forward pass -[2023-09-07 04:50:08,807][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-07 04:50:13,859][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-09-07 04:50:13,860][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-09-07 04:50:13,861][inference][INFO] - + Warming up the generation pass -[2023-09-07 04:50:14,356][inference][INFO] - + Tracking generation latency and throughput -[2023-09-07 04:50:19,828][inference][INFO] - + Generation pass latency: 4.97e-01 (s) -[2023-09-07 04:50:19,829][inference][INFO] - + Generation pass throughput: 201.00 (tokens/s) -[2023-09-07 04:50:19,830][inference][INFO] - Saving inference results -[2023-09-07 04:50:19,841][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index aa9eb40cc37b36384bbf0b9204e563b3ee0e5c3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index c8e4c7c585553c41da8524144ca676a3ac85920c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.095552,0.00315,317.0 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/main.log deleted file mode 100644 index 08b42eaf0abbaa45cfff409a7bc3937042c02de4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-11 10:50:15,370][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 10:50:15,372][benchmark][INFO] - + Setting seed(42) -[2023-09-11 10:50:16,533][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-11 10:50:16,534][backend][INFO] - Configuring pytorch backend -[2023-09-11 10:50:16,534][backend][INFO] - + Checking initial device isolation -[2023-09-11 10:50:16,534][backend][INFO] - + Checking contineous device isolation -[2023-09-11 10:50:16,534][pytorch][INFO] - + Disabling gradients -[2023-09-11 10:50:16,534][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 10:50:17,136][pytorch][INFO] - + Turning on eval mode -[2023-09-11 10:50:17,137][inference][INFO] - Running inference benchmark -[2023-09-11 10:50:17,251][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 10:50:17,253][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 10:50:17,315][inference][INFO] - + Forward pass peak memory: 467.095552 (MB) -[2023-09-11 10:50:17,317][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 10:50:17,319][inference][INFO] - + Warming up the forward pass -[2023-09-11 10:50:17,355][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 10:50:22,408][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-09-11 10:50:22,409][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-09-11 10:50:22,409][inference][INFO] - Saving inference results -[2023-09-11 10:50:22,420][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index db60a749d73128eddf23eba53ed6482743b46336..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ba88022bce2822594395e2e1a2e8d95d2f430cf7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.14003199999996,0.00339,1180.0 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/main.log deleted file mode 100644 index ea35d0e1928f3e90bb9204be91b92c9a83ce2efe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-11 10:50:22,794][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 10:50:22,796][benchmark][INFO] - + Setting seed(42) -[2023-09-11 10:50:23,213][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-11 10:50:23,213][backend][INFO] - Configuring pytorch backend -[2023-09-11 10:50:23,213][backend][INFO] - + Checking initial device isolation -[2023-09-11 10:50:23,214][backend][INFO] - + Checking contineous device isolation -[2023-09-11 10:50:23,214][pytorch][INFO] - + Disabling gradients -[2023-09-11 10:50:23,214][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 10:50:23,329][pytorch][INFO] - + Turning on eval mode -[2023-09-11 10:50:23,329][inference][INFO] - Running inference benchmark -[2023-09-11 10:50:23,449][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 10:50:23,450][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 10:50:23,494][inference][INFO] - + Forward pass peak memory: 468.14003199999996 (MB) -[2023-09-11 10:50:23,495][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 10:50:23,496][inference][INFO] - + Warming up the forward pass -[2023-09-11 10:50:23,532][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 10:50:28,578][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-09-11 10:50:28,580][inference][INFO] - + Forward pass throughput: 1180.00 (samples/s) -[2023-09-11 10:50:28,580][inference][INFO] - Saving inference results -[2023-09-11 10:50:28,588][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index b919c55fa574ab650a386ef9f84c2c981e028b0a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index b00c7f16cecba7baa6aeee3d6d789aa039c86900..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.13536,0.00356,281.0,0.479,209.0 diff --git a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c99bc9671ffbec03c796cbc997192174e57c5355..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_09:10:40_df04959e5542d41b269f96305d82c62287350cee/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-11 10:50:33,352][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 10:50:33,354][benchmark][INFO] - + Setting seed(42) -[2023-09-11 10:50:34,740][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-11 10:50:34,740][backend][INFO] - Configuring pytorch backend -[2023-09-11 10:50:34,741][backend][INFO] - + Checking initial device isolation -[2023-09-11 10:50:34,741][backend][INFO] - + Checking contineous device isolation -[2023-09-11 10:50:34,741][pytorch][INFO] - + Disabling gradients -[2023-09-11 10:50:34,741][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 10:50:35,499][pytorch][INFO] - + Turning on eval mode -[2023-09-11 10:50:35,500][inference][INFO] - Running inference benchmark -[2023-09-11 10:50:35,691][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 10:50:35,742][inference][INFO] - + Forward pass peak memory: 469.13536 (MB) -[2023-09-11 10:50:35,743][inference][INFO] - + Warming up the forward pass -[2023-09-11 10:50:35,776][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 10:50:40,827][inference][INFO] - + Forward pass latency: 3.56e-03 (s) -[2023-09-11 10:50:40,829][inference][INFO] - + Forward pass throughput: 281.00 (samples/s) -[2023-09-11 10:50:40,829][inference][INFO] - + Warming up the generation pass -[2023-09-11 10:50:41,320][inference][INFO] - + Tracking generation latency and throughput -[2023-09-11 10:50:46,589][inference][INFO] - + Generation pass latency: 4.79e-01 (s) -[2023-09-11 10:50:46,590][inference][INFO] - + Generation pass throughput: 209.00 (tokens/s) -[2023-09-11 10:50:46,591][inference][INFO] - Saving inference results -[2023-09-11 10:50:46,602][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 89210d3dd9ff35507a0a9742827e7721099cfc2b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 63befc69b05c014d03be52fc248845b2a54557ca..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.17337599999996,0.00316,316.0 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8c4ac8d7c0aea19ecd91999dd2bc7a1a4fff0f54..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-11 14:50:16,705][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 14:50:16,705][benchmark][INFO] - + Setting seed(42) -[2023-09-11 14:50:17,957][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-11 14:50:17,958][backend][INFO] - Configuring pytorch backend -[2023-09-11 14:50:17,958][backend][INFO] - + Checking initial device isolation -[2023-09-11 14:50:17,958][backend][INFO] - + Checking contineous device isolation -[2023-09-11 14:50:17,958][pytorch][INFO] - + Disabling gradients -[2023-09-11 14:50:17,958][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 14:50:18,560][pytorch][INFO] - + Turning on eval mode -[2023-09-11 14:50:18,561][inference][INFO] - Running inference benchmark -[2023-09-11 14:50:18,679][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 14:50:18,681][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 14:50:18,743][inference][INFO] - + Forward pass peak memory: 467.17337599999996 (MB) -[2023-09-11 14:50:18,744][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 14:50:18,746][inference][INFO] - + Warming up the forward pass -[2023-09-11 14:50:18,778][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 14:50:23,830][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-09-11 14:50:23,831][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-09-11 14:50:23,832][inference][INFO] - Saving inference results -[2023-09-11 14:50:23,843][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d8442248da5b7d25ed0bece70d4959c0a2707330..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7e151f0adf85e971a26f84bf385663bfd92c37e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.230144,0.00353,1130.0 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/main.log deleted file mode 100644 index adb131072bc75d5225db0b1cef364df9dbd05820..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-11 14:50:24,229][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 14:50:24,229][benchmark][INFO] - + Setting seed(42) -[2023-09-11 14:50:24,688][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-11 14:50:24,688][backend][INFO] - Configuring pytorch backend -[2023-09-11 14:50:24,689][backend][INFO] - + Checking initial device isolation -[2023-09-11 14:50:24,689][backend][INFO] - + Checking contineous device isolation -[2023-09-11 14:50:24,689][pytorch][INFO] - + Disabling gradients -[2023-09-11 14:50:24,689][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 14:50:24,800][pytorch][INFO] - + Turning on eval mode -[2023-09-11 14:50:24,801][inference][INFO] - Running inference benchmark -[2023-09-11 14:50:24,917][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 14:50:24,919][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 14:50:24,958][inference][INFO] - + Forward pass peak memory: 468.230144 (MB) -[2023-09-11 14:50:24,959][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 14:50:24,961][inference][INFO] - + Warming up the forward pass -[2023-09-11 14:50:24,996][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 14:50:30,040][inference][INFO] - + Forward pass latency: 3.53e-03 (s) -[2023-09-11 14:50:30,041][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-09-11 14:50:30,041][inference][INFO] - Saving inference results -[2023-09-11 14:50:30,049][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 974dbfd43a60001b9f0cc337594fcec87baaa8d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 735cc576f21ff23df0352e71dcaf1e6014c871c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.07392,0.00432,231.0,0.507,197.0 diff --git a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 7e5af00e4ad1f86f13b08294a84645b6371f219c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_12:51:35_0188739a74dca8a9cf3f646a9a417af7f136f1aa/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-11 14:50:35,074][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 14:50:35,074][benchmark][INFO] - + Setting seed(42) -[2023-09-11 14:50:36,520][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-11 14:50:36,520][backend][INFO] - Configuring pytorch backend -[2023-09-11 14:50:36,520][backend][INFO] - + Checking initial device isolation -[2023-09-11 14:50:36,521][backend][INFO] - + Checking contineous device isolation -[2023-09-11 14:50:36,521][pytorch][INFO] - + Disabling gradients -[2023-09-11 14:50:36,521][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 14:50:37,326][pytorch][INFO] - + Turning on eval mode -[2023-09-11 14:50:37,327][inference][INFO] - Running inference benchmark -[2023-09-11 14:50:37,530][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 14:50:37,577][inference][INFO] - + Forward pass peak memory: 469.07392 (MB) -[2023-09-11 14:50:37,578][inference][INFO] - + Warming up the forward pass -[2023-09-11 14:50:37,614][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 14:50:42,658][inference][INFO] - + Forward pass latency: 4.32e-03 (s) -[2023-09-11 14:50:42,659][inference][INFO] - + Forward pass throughput: 231.00 (samples/s) -[2023-09-11 14:50:42,660][inference][INFO] - + Warming up the generation pass -[2023-09-11 14:50:43,232][inference][INFO] - + Tracking generation latency and throughput -[2023-09-11 14:50:48,308][inference][INFO] - + Generation pass latency: 5.07e-01 (s) -[2023-09-11 14:50:48,309][inference][INFO] - + Generation pass throughput: 197.00 (tokens/s) -[2023-09-11 14:50:48,309][inference][INFO] - Saving inference results -[2023-09-11 14:50:48,321][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 8ab038ef4fe5a61dda40defb714297e7a705532c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 778759c18bb356b2747d9d2ec1b7d94f65c63060..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.72281599999997,0.00321,312.0 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/main.log deleted file mode 100644 index 86c334dd096fe7fc23dfcf06c31e27584d3eb660..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-11 20:50:12,551][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 20:50:12,552][benchmark][INFO] - + Setting seed(42) -[2023-09-11 20:50:14,085][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-11 20:50:14,085][backend][INFO] - Configuring pytorch backend -[2023-09-11 20:50:14,086][backend][INFO] - + Checking initial device isolation -[2023-09-11 20:50:14,086][backend][INFO] - + Checking contineous device isolation -[2023-09-11 20:50:14,086][pytorch][INFO] - + Disabling gradients -[2023-09-11 20:50:14,086][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 20:50:14,688][pytorch][INFO] - + Turning on eval mode -[2023-09-11 20:50:14,689][inference][INFO] - Running inference benchmark -[2023-09-11 20:50:14,803][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 20:50:14,804][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 20:50:14,866][inference][INFO] - + Forward pass peak memory: 466.72281599999997 (MB) -[2023-09-11 20:50:14,867][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 20:50:14,869][inference][INFO] - + Warming up the forward pass -[2023-09-11 20:50:14,906][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 20:50:19,960][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-09-11 20:50:19,962][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-11 20:50:19,962][inference][INFO] - Saving inference results -[2023-09-11 20:50:19,975][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index abc38fd0bf4527f5656b511f3c80d5eea914d79b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 4320dc8cf6fd4adfe68343162897deb98d17aafe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.779584,0.00432,926.0 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/main.log deleted file mode 100644 index af6ab263cade32071db366855b994c6a0d6d9e71..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-11 20:50:20,382][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 20:50:20,383][benchmark][INFO] - + Setting seed(42) -[2023-09-11 20:50:21,041][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-11 20:50:21,041][backend][INFO] - Configuring pytorch backend -[2023-09-11 20:50:21,042][backend][INFO] - + Checking initial device isolation -[2023-09-11 20:50:21,042][backend][INFO] - + Checking contineous device isolation -[2023-09-11 20:50:21,042][pytorch][INFO] - + Disabling gradients -[2023-09-11 20:50:21,042][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 20:50:21,156][pytorch][INFO] - + Turning on eval mode -[2023-09-11 20:50:21,156][inference][INFO] - Running inference benchmark -[2023-09-11 20:50:21,279][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 20:50:21,280][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 20:50:21,328][inference][INFO] - + Forward pass peak memory: 467.779584 (MB) -[2023-09-11 20:50:21,329][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-11 20:50:21,331][inference][INFO] - + Warming up the forward pass -[2023-09-11 20:50:21,382][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 20:50:26,424][inference][INFO] - + Forward pass latency: 4.32e-03 (s) -[2023-09-11 20:50:26,425][inference][INFO] - + Forward pass throughput: 926.00 (samples/s) -[2023-09-11 20:50:26,425][inference][INFO] - Saving inference results -[2023-09-11 20:50:26,433][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 87c715c1949fcf3f7eb1d7e3a62603fcf8ea3708..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 0b50c65ee4669922e10158cfb936e2574c7b894c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,470.441984,0.00322,311.0,0.495,202.0 diff --git a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 58748472767b1e4a986c1e7cf87c8a03666ea493..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:17:30_3744126c87ad429ba60efc690d8ceb4630dff523/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-11 20:50:31,313][benchmark][INFO] - Configuring inference benchmark -[2023-09-11 20:50:31,314][benchmark][INFO] - + Setting seed(42) -[2023-09-11 20:50:32,814][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-11 20:50:32,814][backend][INFO] - Configuring pytorch backend -[2023-09-11 20:50:32,815][backend][INFO] - + Checking initial device isolation -[2023-09-11 20:50:32,815][backend][INFO] - + Checking contineous device isolation -[2023-09-11 20:50:32,815][pytorch][INFO] - + Disabling gradients -[2023-09-11 20:50:32,815][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-11 20:50:33,452][pytorch][INFO] - + Turning on eval mode -[2023-09-11 20:50:33,452][inference][INFO] - Running inference benchmark -[2023-09-11 20:50:33,810][inference][INFO] - + Tracking forward pass peak memory -[2023-09-11 20:50:33,943][inference][INFO] - + Forward pass peak memory: 470.441984 (MB) -[2023-09-11 20:50:33,945][inference][INFO] - + Warming up the forward pass -[2023-09-11 20:50:33,978][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-11 20:50:39,028][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-09-11 20:50:39,029][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-09-11 20:50:39,029][inference][INFO] - + Warming up the generation pass -[2023-09-11 20:50:39,527][inference][INFO] - + Tracking generation latency and throughput -[2023-09-11 20:50:44,973][inference][INFO] - + Generation pass latency: 4.95e-01 (s) -[2023-09-11 20:50:44,974][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-11 20:50:44,974][inference][INFO] - Saving inference results -[2023-09-11 20:50:44,988][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5470ea537f8ca9caf117664ae3ee24bef9fd2385..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8e30e24b8a20b4ab564a2d418f51d3a88d938add..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.08383999999995,0.00408,245.0 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/main.log deleted file mode 100644 index 6ab86a9c46ad6d5f88b85bab50292dda7709c8c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:50:18,172][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:50:18,173][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:50:19,558][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:50:19,559][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:50:19,559][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:50:19,559][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:50:19,559][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:50:19,560][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:50:20,280][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:50:20,280][inference][INFO] - Running inference benchmark -[2023-09-12 08:50:20,407][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:50:20,408][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:50:20,465][inference][INFO] - + Forward pass peak memory: 466.08383999999995 (MB) -[2023-09-12 08:50:20,467][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:50:20,469][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:50:20,500][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:50:25,544][inference][INFO] - + Forward pass latency: 4.08e-03 (s) -[2023-09-12 08:50:25,546][inference][INFO] - + Forward pass throughput: 245.00 (samples/s) -[2023-09-12 08:50:25,546][inference][INFO] - Saving inference results -[2023-09-12 08:50:25,556][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index ebacd8a18111a0e32a094c1571d59b317e97c492..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index aa66ca1e509fa1e6a91a3ca2a32ee6bc28240e2d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.931712,0.00427,937.0 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/main.log deleted file mode 100644 index ee3d69d3f70e52c778a35ada7c75957b60cd9607..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:50:25,925][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:50:25,926][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:50:26,346][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:50:26,346][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:50:26,346][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:50:26,347][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:50:26,347][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:50:26,347][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:50:26,465][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:50:26,465][inference][INFO] - Running inference benchmark -[2023-09-12 08:50:26,600][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:50:26,602][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:50:26,643][inference][INFO] - + Forward pass peak memory: 466.931712 (MB) -[2023-09-12 08:50:26,644][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:50:26,646][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:50:26,688][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:50:31,732][inference][INFO] - + Forward pass latency: 4.27e-03 (s) -[2023-09-12 08:50:31,733][inference][INFO] - + Forward pass throughput: 937.00 (samples/s) -[2023-09-12 08:50:31,733][inference][INFO] - Saving inference results -[2023-09-12 08:50:31,741][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e01c724700d3a4508376e60d466e69e1dde16aff..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f624d880b3647c44cd6391dbc2191fac71aed325..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.430272,0.00308,325.0,0.487,205.0 diff --git a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f47b044f3f0d8cda2238fa92b7806b629976e8e6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_16:49:14_2af87d018eff577c421b02e00bf34ab6aeeb8c42/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 08:50:36,583][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:50:36,584][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:50:38,029][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 08:50:38,029][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:50:38,030][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:50:38,030][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:50:38,030][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:50:38,030][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:50:38,670][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:50:38,671][inference][INFO] - Running inference benchmark -[2023-09-12 08:50:38,858][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:50:38,904][inference][INFO] - + Forward pass peak memory: 469.430272 (MB) -[2023-09-12 08:50:38,906][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:50:38,942][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:50:43,992][inference][INFO] - + Forward pass latency: 3.08e-03 (s) -[2023-09-12 08:50:43,994][inference][INFO] - + Forward pass throughput: 325.00 (samples/s) -[2023-09-12 08:50:43,994][inference][INFO] - + Warming up the generation pass -[2023-09-12 08:50:44,488][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 08:50:49,846][inference][INFO] - + Generation pass latency: 4.87e-01 (s) -[2023-09-12 08:50:49,847][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-09-12 08:50:49,847][inference][INFO] - Saving inference results -[2023-09-12 08:50:49,859][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 9ecccbee940add9896168ead1739bdb6b8c72bad..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 8489c021d45b8607d6e30f7f1eac04d68d2244e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.091456,0.00326,307.0 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/main.log deleted file mode 100644 index 8976c0ee81c5cbd3675302d20e883f9eb35b8466..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:52:12,737][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:52:12,738][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:52:13,926][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:52:13,926][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:52:13,926][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:52:13,927][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:52:13,927][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:52:13,927][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:52:14,524][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:52:14,525][inference][INFO] - Running inference benchmark -[2023-09-12 08:52:14,643][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:52:14,644][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:52:14,705][inference][INFO] - + Forward pass peak memory: 467.091456 (MB) -[2023-09-12 08:52:14,706][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:52:14,708][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:52:14,747][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:52:19,798][inference][INFO] - + Forward pass latency: 3.26e-03 (s) -[2023-09-12 08:52:19,799][inference][INFO] - + Forward pass throughput: 307.00 (samples/s) -[2023-09-12 08:52:19,799][inference][INFO] - Saving inference results -[2023-09-12 08:52:19,809][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 70610c4810c145c0e23fe068effb22d08af5d4ea..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 33f0e984cc3ee3623b436ddc2e4a324f3dbb7305..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.123648,0.00367,1090.0 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4cc692dc3b08f074f7e888962be003fa64c90f2a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:52:20,170][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:52:20,172][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:52:20,670][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:52:20,670][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:52:20,671][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:52:20,671][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:52:20,671][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:52:20,671][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:52:20,789][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:52:20,790][inference][INFO] - Running inference benchmark -[2023-09-12 08:52:20,911][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:52:20,912][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:52:20,954][inference][INFO] - + Forward pass peak memory: 468.123648 (MB) -[2023-09-12 08:52:20,955][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:52:20,957][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:52:21,009][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:52:26,052][inference][INFO] - + Forward pass latency: 3.67e-03 (s) -[2023-09-12 08:52:26,053][inference][INFO] - + Forward pass throughput: 1090.00 (samples/s) -[2023-09-12 08:52:26,053][inference][INFO] - Saving inference results -[2023-09-12 08:52:26,061][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 9157706ea2b981daba33fd3682b4305337b1783c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2b523e5b9d42c587279b12edf8d085962be4d68d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.184512,0.00384,260.0,0.507,197.0 diff --git a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 42f8daa8a170b1addf0b97d3e5a878308e998234..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:51:45_02c4a77f572199a926b52fd8559dca87de6ed4bb/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 08:52:30,814][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:52:30,814][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:52:32,391][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 08:52:32,392][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:52:32,392][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:52:32,392][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:52:32,392][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:52:32,392][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:52:33,009][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:52:33,010][inference][INFO] - Running inference benchmark -[2023-09-12 08:52:33,199][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:52:33,245][inference][INFO] - + Forward pass peak memory: 469.184512 (MB) -[2023-09-12 08:52:33,247][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:52:33,283][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:52:38,327][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-09-12 08:52:38,328][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-09-12 08:52:38,329][inference][INFO] - + Warming up the generation pass -[2023-09-12 08:52:38,832][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 08:52:43,908][inference][INFO] - + Generation pass latency: 5.07e-01 (s) -[2023-09-12 08:52:43,909][inference][INFO] - + Generation pass throughput: 197.00 (tokens/s) -[2023-09-12 08:52:43,910][inference][INFO] - Saving inference results -[2023-09-12 08:52:43,922][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index dac458d4b96b76055cef9c8105cae13b232433f7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index a6e3750a5c00bc110845f3c539d21b2237b50be1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.12832,0.00349,287.0 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5b44b7648a1781a48ad84666f86a50754632d70f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:54:03,664][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:54:03,665][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:54:04,839][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:54:04,840][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:54:04,840][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:54:04,840][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:54:04,840][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:54:04,840][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:54:05,413][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:54:05,414][inference][INFO] - Running inference benchmark -[2023-09-12 08:54:05,550][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:54:05,553][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:54:05,620][inference][INFO] - + Forward pass peak memory: 467.12832 (MB) -[2023-09-12 08:54:05,621][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:54:05,622][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:54:05,657][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:54:10,707][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-09-12 08:54:10,708][inference][INFO] - + Forward pass throughput: 287.00 (samples/s) -[2023-09-12 08:54:10,708][inference][INFO] - Saving inference results -[2023-09-12 08:54:10,719][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 478edf7f1cf5bbdd9290a345606c39f8bdc92808..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 84b79e394eb204509883140837fa0ce768a14955..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,468.107264,0.00371,1080.0 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/main.log deleted file mode 100644 index 464f69a7cf10f7c5d125b701ac038dd490d6b086..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:54:11,084][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:54:11,085][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:54:11,602][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:54:11,603][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:54:11,603][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:54:11,603][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:54:11,603][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:54:11,603][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:54:11,712][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:54:11,712][inference][INFO] - Running inference benchmark -[2023-09-12 08:54:11,829][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:54:11,830][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:54:11,872][inference][INFO] - + Forward pass peak memory: 468.107264 (MB) -[2023-09-12 08:54:11,873][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:54:11,875][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:54:11,915][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:54:16,960][inference][INFO] - + Forward pass latency: 3.71e-03 (s) -[2023-09-12 08:54:16,961][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-09-12 08:54:16,961][inference][INFO] - Saving inference results -[2023-09-12 08:54:16,969][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e3a50d6813aa8c520b2e4ecb867f6a8951380428..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 2f57759c9d7c3ccc8179e5dbf35bcea5314eb55a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.413888,0.00455,220.0,0.568,176.0 diff --git a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 7b3cd2b4aedad913f90b2bf29ce9fa5bc0dae751..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:53:43_00efd64e516cf9062ff2c0dea023ad07c993869f/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 08:54:21,768][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:54:21,769][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:54:23,177][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 08:54:23,177][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:54:23,177][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:54:23,177][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:54:23,177][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:54:23,179][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:54:23,803][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:54:23,803][inference][INFO] - Running inference benchmark -[2023-09-12 08:54:23,995][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:54:24,042][inference][INFO] - + Forward pass peak memory: 469.413888 (MB) -[2023-09-12 08:54:24,044][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:54:24,079][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:54:29,124][inference][INFO] - + Forward pass latency: 4.55e-03 (s) -[2023-09-12 08:54:29,125][inference][INFO] - + Forward pass throughput: 220.00 (samples/s) -[2023-09-12 08:54:29,126][inference][INFO] - + Warming up the generation pass -[2023-09-12 08:54:29,714][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 08:54:34,832][inference][INFO] - + Generation pass latency: 5.68e-01 (s) -[2023-09-12 08:54:34,833][inference][INFO] - + Generation pass throughput: 176.00 (tokens/s) -[2023-09-12 08:54:34,833][inference][INFO] - Saving inference results -[2023-09-12 08:54:34,844][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 2a104965118cc94578c08ccce7ed767b901781e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 51f729931d2371a5b5ee41d89f11dd4d27a718f0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.132992,0.0032,312.0 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/main.log deleted file mode 100644 index 20ece9b62b69cde54a07d68579aecf344b9d28f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:55:53,422][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:55:53,423][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:55:55,184][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:55:55,184][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:55:55,184][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:55:55,184][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:55:55,184][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:55:55,185][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:55:55,789][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:55:55,789][inference][INFO] - Running inference benchmark -[2023-09-12 08:55:55,905][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:55:55,907][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:55:55,968][inference][INFO] - + Forward pass peak memory: 466.132992 (MB) -[2023-09-12 08:55:55,969][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:55:55,971][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:55:56,009][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:56:01,058][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-09-12 08:56:01,059][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-12 08:56:01,059][inference][INFO] - Saving inference results -[2023-09-12 08:56:01,069][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 340e637d04bc285a71e4e76713916a058cb44f3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 86a412f3a618a877ed91b404c5b7c5982c89a29c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.083264,0.00374,1070.0 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4bd679c61e25e6d78aabb217475a8fa246e89694..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:56:01,434][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:56:01,434][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:56:01,883][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:56:01,883][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:56:01,883][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:56:01,883][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:56:01,883][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:56:01,884][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:56:01,991][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:56:01,991][inference][INFO] - Running inference benchmark -[2023-09-12 08:56:02,105][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:56:02,107][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:56:02,146][inference][INFO] - + Forward pass peak memory: 467.083264 (MB) -[2023-09-12 08:56:02,147][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:56:02,149][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:56:02,184][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:56:07,229][inference][INFO] - + Forward pass latency: 3.74e-03 (s) -[2023-09-12 08:56:07,230][inference][INFO] - + Forward pass throughput: 1070.00 (samples/s) -[2023-09-12 08:56:07,230][inference][INFO] - Saving inference results -[2023-09-12 08:56:07,237][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 5456de7b7420126d668f8e13ccde8e3ecd8193e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f75a6ad93cd6c70fec46440f707cbc71f313cfe1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.528576,0.00326,307.0,0.5,200.0 diff --git a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index ca7da1d0e6386177ab64d875c8e2ce0b80e1797f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_18:54:52_c5e66a40a44d3170101236830737a22424846e46/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 08:56:12,098][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:56:12,099][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:56:13,474][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 08:56:13,474][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:56:13,474][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:56:13,475][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:56:13,475][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:56:13,475][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:56:14,240][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:56:14,240][inference][INFO] - Running inference benchmark -[2023-09-12 08:56:14,434][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:56:14,479][inference][INFO] - + Forward pass peak memory: 469.528576 (MB) -[2023-09-12 08:56:14,480][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:56:14,517][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:56:19,569][inference][INFO] - + Forward pass latency: 3.26e-03 (s) -[2023-09-12 08:56:19,571][inference][INFO] - + Forward pass throughput: 307.00 (samples/s) -[2023-09-12 08:56:19,571][inference][INFO] - + Warming up the generation pass -[2023-09-12 08:56:20,106][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 08:56:25,107][inference][INFO] - + Generation pass latency: 5.00e-01 (s) -[2023-09-12 08:56:25,108][inference][INFO] - + Generation pass throughput: 200.00 (tokens/s) -[2023-09-12 08:56:25,108][inference][INFO] - Saving inference results -[2023-09-12 08:56:25,119][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 5071a123c78f5ce1387e4a76d1e86629aaf897f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index d629f2a37fbe68605f95aa7acacb6d8533d4bd7f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.61632,0.00367,272.0 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/main.log b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/main.log deleted file mode 100644 index 2f7e1f132274962b06bea9b4d00331c4eb79be2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:57:43,474][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:57:43,475][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:57:44,756][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:57:44,756][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:57:44,756][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:57:44,757][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:57:44,757][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:57:44,757][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:57:45,380][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:57:45,381][inference][INFO] - Running inference benchmark -[2023-09-12 08:57:45,500][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:57:45,502][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:57:45,560][inference][INFO] - + Forward pass peak memory: 466.61632 (MB) -[2023-09-12 08:57:45,561][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:57:45,563][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:57:45,606][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:57:50,652][inference][INFO] - + Forward pass latency: 3.67e-03 (s) -[2023-09-12 08:57:50,653][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-09-12 08:57:50,653][inference][INFO] - Saving inference results -[2023-09-12 08:57:50,663][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 737e13616e5b5184f28a9c3cfac400085f6d2d67..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 659860fe17ba17aa0ffeddaf9bbdc76eafc0f2eb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.611648,0.00351,1140.0 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/main.log b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/main.log deleted file mode 100644 index 9dbe22dcfd62882fa0cf4c3d6cc112833f103f87..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:57:51,027][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:57:51,028][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:57:51,458][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:57:51,458][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:57:51,458][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:57:51,458][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:57:51,459][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:57:51,459][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:57:51,571][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:57:51,572][inference][INFO] - Running inference benchmark -[2023-09-12 08:57:51,690][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:57:51,691][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:57:51,731][inference][INFO] - + Forward pass peak memory: 467.611648 (MB) -[2023-09-12 08:57:51,732][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:57:51,734][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:57:51,769][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:57:56,813][inference][INFO] - + Forward pass latency: 3.51e-03 (s) -[2023-09-12 08:57:56,815][inference][INFO] - + Forward pass throughput: 1140.00 (samples/s) -[2023-09-12 08:57:56,815][inference][INFO] - Saving inference results -[2023-09-12 08:57:56,822][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 7b051148efc2c26e6a2e13f150c50cf1f45f0e8a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f835ee753db2144102cfc45dcae62d917f68d859..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.21318399999996,0.0039,256.0,0.487,205.0 diff --git a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index f58665b749fe93c34bc62aec2dd31690c1216404..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-07_19:00:22_fb7d246951d5f60aa36a7958841dfea72f51fc6b/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 08:58:01,947][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:58:01,948][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:58:03,455][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 08:58:03,455][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:58:03,455][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:58:03,456][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:58:03,456][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:58:03,456][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:58:04,080][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:58:04,081][inference][INFO] - Running inference benchmark -[2023-09-12 08:58:04,273][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:58:04,319][inference][INFO] - + Forward pass peak memory: 469.21318399999996 (MB) -[2023-09-12 08:58:04,320][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:58:04,356][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:58:09,400][inference][INFO] - + Forward pass latency: 3.90e-03 (s) -[2023-09-12 08:58:09,401][inference][INFO] - + Forward pass throughput: 256.00 (samples/s) -[2023-09-12 08:58:09,402][inference][INFO] - + Warming up the generation pass -[2023-09-12 08:58:09,932][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 08:58:15,291][inference][INFO] - + Generation pass latency: 4.87e-01 (s) -[2023-09-12 08:58:15,292][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-09-12 08:58:15,292][inference][INFO] - Saving inference results -[2023-09-12 08:58:15,303][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index f967788a2276917f659abda137f2313d49570e35..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 5bd1623a8393eb84c01f775e5b5d10e5b00cf7e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.21491199999997,0.0032,312.0 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/main.log b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/main.log deleted file mode 100644 index 4dcd14aed1ab083a79b7b191cd4a7f5a27c8b34e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:59:33,192][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:59:33,193][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:59:34,535][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:59:34,536][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:59:34,536][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:59:34,536][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:59:34,536][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:59:34,536][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:59:35,128][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:59:35,128][inference][INFO] - Running inference benchmark -[2023-09-12 08:59:35,245][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:59:35,246][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:59:35,306][inference][INFO] - + Forward pass peak memory: 466.21491199999997 (MB) -[2023-09-12 08:59:35,307][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:59:35,309][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:59:35,350][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:59:40,399][inference][INFO] - + Forward pass latency: 3.20e-03 (s) -[2023-09-12 08:59:40,401][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-12 08:59:40,401][inference][INFO] - Saving inference results -[2023-09-12 08:59:40,412][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 4f408e2d2dbf963337867162f1b37001067f0a23..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 1ed0d7182c4a4c7e00c3588e051b205032232b95..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.255296,0.00347,1150.0 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/main.log b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/main.log deleted file mode 100644 index eba4a1bb25c898b2cfd67a7df620b4a873f289b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 08:59:40,775][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:59:40,776][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:59:41,308][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 08:59:41,308][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:59:41,309][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:59:41,309][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:59:41,309][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:59:41,309][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:59:41,428][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:59:41,429][inference][INFO] - Running inference benchmark -[2023-09-12 08:59:41,542][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:59:41,543][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:59:41,585][inference][INFO] - + Forward pass peak memory: 467.255296 (MB) -[2023-09-12 08:59:41,586][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 08:59:41,587][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:59:41,622][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:59:46,667][inference][INFO] - + Forward pass latency: 3.47e-03 (s) -[2023-09-12 08:59:46,668][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-09-12 08:59:46,669][inference][INFO] - Saving inference results -[2023-09-12 08:59:46,676][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6bc88d1f134a581fa01c012a624a6dbd9ffa4aab..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 93872b9b73c51e139fe55eb76efef875e5146b59..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.28281599999997,0.00342,292.0,0.488,205.0 diff --git a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 01849f9e0e4aae5b3ec55890ac7e4244c223c4ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_11:49:39_0c67a72c9ab46996b0dc3175c80c1fee881bcc83/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 08:59:51,529][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 08:59:51,529][benchmark][INFO] - + Setting seed(42) -[2023-09-12 08:59:53,007][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 08:59:53,007][backend][INFO] - Configuring pytorch backend -[2023-09-12 08:59:53,007][backend][INFO] - + Checking initial device isolation -[2023-09-12 08:59:53,007][backend][INFO] - + Checking contineous device isolation -[2023-09-12 08:59:53,008][pytorch][INFO] - + Disabling gradients -[2023-09-12 08:59:53,008][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 08:59:53,637][pytorch][INFO] - + Turning on eval mode -[2023-09-12 08:59:53,637][inference][INFO] - Running inference benchmark -[2023-09-12 08:59:53,839][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 08:59:53,884][inference][INFO] - + Forward pass peak memory: 469.28281599999997 (MB) -[2023-09-12 08:59:53,885][inference][INFO] - + Warming up the forward pass -[2023-09-12 08:59:53,921][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 08:59:58,970][inference][INFO] - + Forward pass latency: 3.42e-03 (s) -[2023-09-12 08:59:58,972][inference][INFO] - + Forward pass throughput: 292.00 (samples/s) -[2023-09-12 08:59:58,973][inference][INFO] - + Warming up the generation pass -[2023-09-12 08:59:59,478][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:00:04,851][inference][INFO] - + Generation pass latency: 4.88e-01 (s) -[2023-09-12 09:00:04,853][inference][INFO] - + Generation pass throughput: 205.00 (tokens/s) -[2023-09-12 09:00:04,853][inference][INFO] - Saving inference results -[2023-09-12 09:00:04,864][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index bd2bf7d6f258ee16ed7551f8c15b2e13a299756c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 16eccb639f0e3bab621542de4594aa6de8e1c5d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,466.628608,0.00398,251.0 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/main.log b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/main.log deleted file mode 100644 index 151dd097b43af6f6a56e191ea88f11bd9abc0910..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:01:23,191][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:01:23,192][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:01:24,370][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:01:24,370][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:01:24,370][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:01:24,370][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:01:24,370][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:01:24,370][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:01:25,003][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:01:25,004][inference][INFO] - Running inference benchmark -[2023-09-12 09:01:25,245][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:01:25,246][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:01:25,304][inference][INFO] - + Forward pass peak memory: 466.628608 (MB) -[2023-09-12 09:01:25,305][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:01:25,307][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:01:25,348][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:01:30,396][inference][INFO] - + Forward pass latency: 3.98e-03 (s) -[2023-09-12 09:01:30,398][inference][INFO] - + Forward pass throughput: 251.00 (samples/s) -[2023-09-12 09:01:30,398][inference][INFO] - Saving inference results -[2023-09-12 09:01:30,412][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index eb377c0fd0676a722823699fa494fa29f291d725..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9c28eafcb0fed89f8491467c82992b21eb8c7ca3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,467.57478399999997,0.00453,883.0 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/main.log b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/main.log deleted file mode 100644 index 0f2e6a59e29589279347a7764da437dde4d66e1b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:01:30,805][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:01:30,806][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:01:31,263][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:01:31,263][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:01:31,263][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:01:31,263][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:01:31,263][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:01:31,264][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:01:31,379][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:01:31,379][inference][INFO] - Running inference benchmark -[2023-09-12 09:01:31,499][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:01:31,501][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:01:31,542][inference][INFO] - + Forward pass peak memory: 467.57478399999997 (MB) -[2023-09-12 09:01:31,543][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:01:31,545][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:01:31,587][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:01:36,627][inference][INFO] - + Forward pass latency: 4.53e-03 (s) -[2023-09-12 09:01:36,628][inference][INFO] - + Forward pass throughput: 883.00 (samples/s) -[2023-09-12 09:01:36,629][inference][INFO] - Saving inference results -[2023-09-12 09:01:36,636][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 69a30f788f2e87ffe690b373cd3bcb38c60a7da4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 7a08ed362d2d667cdd1f81989b49d0b6ee9420c2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,469.44665599999996,0.00383,261.0,0.547,183.0 diff --git a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 3d720d3071b61e918b60664f26c88a28e851f414..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_13:51:54_18ee1fe76295239335bf1528c744fe1cfba21cc8/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:01:41,576][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:01:41,577][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:01:43,019][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:01:43,019][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:01:43,020][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:01:43,020][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:01:43,020][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:01:43,020][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:01:43,671][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:01:43,671][inference][INFO] - Running inference benchmark -[2023-09-12 09:01:43,857][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:01:43,904][inference][INFO] - + Forward pass peak memory: 469.44665599999996 (MB) -[2023-09-12 09:01:43,906][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:01:43,941][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:01:48,988][inference][INFO] - + Forward pass latency: 3.83e-03 (s) -[2023-09-12 09:01:48,990][inference][INFO] - + Forward pass throughput: 261.00 (samples/s) -[2023-09-12 09:01:48,990][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:01:49,577][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:01:55,048][inference][INFO] - + Generation pass latency: 5.47e-01 (s) -[2023-09-12 09:01:55,049][inference][INFO] - + Generation pass throughput: 183.00 (tokens/s) -[2023-09-12 09:01:55,049][inference][INFO] - Saving inference results -[2023-09-12 09:01:55,066][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 14b63324788ca98865b5add3b184cf8c1427545b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 73b5dd0e4a8fe682d12f97f6654140fec3de86f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,470.97446399999995,0.0031,323.0 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/main.log b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/main.log deleted file mode 100644 index a9e078590c54c2f3ccf0661c8af13f70960eb57c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:03:12,757][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:03:12,757][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:03:13,973][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:03:13,973][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:03:13,973][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:03:13,973][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:03:13,974][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:03:13,974][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:03:14,577][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:03:14,578][inference][INFO] - Running inference benchmark -[2023-09-12 09:03:14,696][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:03:14,698][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:03:14,909][inference][INFO] - + Forward pass peak memory: 470.97446399999995 (MB) -[2023-09-12 09:03:14,910][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:03:14,912][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:03:14,944][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:03:19,994][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-09-12 09:03:19,995][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-09-12 09:03:19,995][inference][INFO] - Saving inference results -[2023-09-12 09:03:20,008][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 47f05f4fc06107c20dc438fdd8b3f6f68d2dde08..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index f5f4f9d17a75bd4c79925a161129b873a3fd7ed0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.227264,0.00413,969.0 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/main.log b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/main.log deleted file mode 100644 index 08b97c86164d515bb597d7e3d95bef00c86dedc2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:03:20,388][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:03:20,388][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:03:20,822][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:03:20,822][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:03:20,822][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:03:20,822][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:03:20,822][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:03:20,823][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:03:20,948][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:03:20,949][inference][INFO] - Running inference benchmark -[2023-09-12 09:03:21,076][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:03:21,077][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:03:21,119][inference][INFO] - + Forward pass peak memory: 473.227264 (MB) -[2023-09-12 09:03:21,120][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:03:21,122][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:03:21,170][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:03:26,212][inference][INFO] - + Forward pass latency: 4.13e-03 (s) -[2023-09-12 09:03:26,213][inference][INFO] - + Forward pass throughput: 969.00 (samples/s) -[2023-09-12 09:03:26,213][inference][INFO] - Saving inference results -[2023-09-12 09:03:26,222][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 79caae3606e8b65feb7b1500f781e88e6c4224dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3388bdd9aabe2ceb9973c52acb54474c4cbd65c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,471.916544,0.00312,321.0,0.489,204.0 diff --git a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5ac24c552c6fdb36eeead0aaac3767500f61003a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:13:33_6c26faa159b79a42d7fa46cb66e2d21523351987/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:03:31,166][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:03:31,167][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:03:33,169][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:03:33,169][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:03:33,169][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:03:33,169][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:03:33,170][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:03:33,170][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:03:33,819][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:03:33,820][inference][INFO] - Running inference benchmark -[2023-09-12 09:03:34,014][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:03:34,217][inference][INFO] - + Forward pass peak memory: 471.916544 (MB) -[2023-09-12 09:03:34,219][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:03:34,256][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:03:39,307][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-09-12 09:03:39,310][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-09-12 09:03:39,311][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:03:39,800][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:03:45,182][inference][INFO] - + Generation pass latency: 4.89e-01 (s) -[2023-09-12 09:03:45,183][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-09-12 09:03:45,183][inference][INFO] - Saving inference results -[2023-09-12 09:03:45,207][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index c473dda04475d85b7fc7661734f60c29eafed422..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 242e542a24a0ca95dce83962036931f30f35615a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,471.621632,0.00335,299.0 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/main.log b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/main.log deleted file mode 100644 index bf0503c6923af57ea60d3f30395c17287c73ddec..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:05:02,869][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:05:02,870][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:05:04,066][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:05:04,067][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:05:04,067][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:05:04,067][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:05:04,067][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:05:04,067][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:05:04,956][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:05:04,956][inference][INFO] - Running inference benchmark -[2023-09-12 09:05:05,074][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:05:05,076][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:05:05,294][inference][INFO] - + Forward pass peak memory: 471.621632 (MB) -[2023-09-12 09:05:05,295][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:05:05,297][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:05:05,334][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:05:10,381][inference][INFO] - + Forward pass latency: 3.35e-03 (s) -[2023-09-12 09:05:10,383][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-09-12 09:05:10,383][inference][INFO] - Saving inference results -[2023-09-12 09:05:10,395][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 695fbad5194d22cb09b53b5e9a79e3c119095feb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index d6afd3146858d1608562a0ff82c90ef5e1f726c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.75564799999995,0.00349,1150.0 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/main.log b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/main.log deleted file mode 100644 index 1f37da302d753a3a5b7f7b3a8c179b369a8ed842..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:05:10,783][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:05:10,784][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:05:11,206][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:05:11,207][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:05:11,207][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:05:11,207][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:05:11,207][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:05:11,207][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:05:11,324][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:05:11,324][inference][INFO] - Running inference benchmark -[2023-09-12 09:05:11,442][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:05:11,443][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:05:11,485][inference][INFO] - + Forward pass peak memory: 473.75564799999995 (MB) -[2023-09-12 09:05:11,486][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:05:11,488][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:05:11,534][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:05:16,579][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-09-12 09:05:16,581][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-09-12 09:05:16,581][inference][INFO] - Saving inference results -[2023-09-12 09:05:16,587][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6a2ac9542e881a9b13fe2439fd8228993be45391..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 16bf29d7c8e4de315d87883d373915f0cf44238b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,472.11315199999996,0.00321,312.0,0.479,209.0 diff --git a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index fcf43706e9ac9db1ec6773ea8f15a300bd24c33c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_19:38:41_d53606031fdf59a6d25a18bd743d77846d0ea22a/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:05:21,450][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:05:21,451][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:05:22,944][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:05:22,945][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:05:22,945][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:05:22,945][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:05:22,945][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:05:22,945][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:05:23,573][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:05:23,573][inference][INFO] - Running inference benchmark -[2023-09-12 09:05:23,764][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:05:23,961][inference][INFO] - + Forward pass peak memory: 472.11315199999996 (MB) -[2023-09-12 09:05:23,963][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:05:24,000][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:05:29,049][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-09-12 09:05:29,051][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-12 09:05:29,051][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:05:29,539][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:05:34,807][inference][INFO] - + Generation pass latency: 4.79e-01 (s) -[2023-09-12 09:05:34,808][inference][INFO] - + Generation pass throughput: 209.00 (tokens/s) -[2023-09-12 09:05:34,808][inference][INFO] - Saving inference results -[2023-09-12 09:05:34,829][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7c9a2cb534d7d82622d7993904f91eeda6d940dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 21cbe03a52d417509526f13f4f4ec469420d5a71..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,472.272896,0.00369,271.0 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/main.log b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/main.log deleted file mode 100644 index 59fc81ac10b23663aeb93706977dee63b6df68b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:06:53,165][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:06:53,166][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:06:54,564][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:06:54,564][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:06:54,565][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:06:54,565][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:06:54,565][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:06:54,565][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:06:55,240][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:06:55,241][inference][INFO] - Running inference benchmark -[2023-09-12 09:06:55,359][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:06:55,360][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:06:55,570][inference][INFO] - + Forward pass peak memory: 472.272896 (MB) -[2023-09-12 09:06:55,571][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:06:55,573][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:06:55,605][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:07:00,655][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-09-12 09:07:00,657][inference][INFO] - + Forward pass throughput: 271.00 (samples/s) -[2023-09-12 09:07:00,657][inference][INFO] - Saving inference results -[2023-09-12 09:07:00,668][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 7fee8e35a4766307a90f10eb2fa2fef9f43fd340..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index a47c9d0d8c5889795f98106a53126d1325a3e1f9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.64915199999996,0.00431,928.0 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/main.log b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/main.log deleted file mode 100644 index 0abadc5ac97df55ca98ea9c2008e522444a9263a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:07:01,054][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:07:01,055][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:07:01,493][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:07:01,494][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:07:01,494][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:07:01,494][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:07:01,494][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:07:01,494][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:07:01,612][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:07:01,612][inference][INFO] - Running inference benchmark -[2023-09-12 09:07:01,743][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:07:01,745][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:07:01,789][inference][INFO] - + Forward pass peak memory: 473.64915199999996 (MB) -[2023-09-12 09:07:01,790][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:07:01,792][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:07:01,842][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:07:06,883][inference][INFO] - + Forward pass latency: 4.31e-03 (s) -[2023-09-12 09:07:06,884][inference][INFO] - + Forward pass throughput: 928.00 (samples/s) -[2023-09-12 09:07:06,885][inference][INFO] - Saving inference results -[2023-09-12 09:07:06,893][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index bfc14b0f6a51e3513c82cfdfb03476262f72a455..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 425ea04eaa3292af913757713048105c653faa7e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,471.8592,0.00321,312.0,0.496,202.0 diff --git a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index b0041c857ca994e1e0551c6bcbbf0148c5d2b579..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-08_20:03:09_09b2de6eb74b1e5ff4f4c3d9839485f4165627c9/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:07:11,706][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:07:11,707][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:07:13,373][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:07:13,373][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:07:13,373][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:07:13,374][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:07:13,374][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:07:13,374][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:07:13,998][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:07:13,999][inference][INFO] - Running inference benchmark -[2023-09-12 09:07:14,190][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:07:14,400][inference][INFO] - + Forward pass peak memory: 471.8592 (MB) -[2023-09-12 09:07:14,403][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:07:14,441][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:07:19,491][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-09-12 09:07:19,493][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-12 09:07:19,494][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:07:20,001][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:07:25,454][inference][INFO] - + Generation pass latency: 4.96e-01 (s) -[2023-09-12 09:07:25,455][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s) -[2023-09-12 09:07:25,455][inference][INFO] - Saving inference results -[2023-09-12 09:07:25,480][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ec85194141d3700a9257aa1bfeea51600edec373..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 2f0898b993c98e670b42e4dbaf1c41d6d23d3311..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,471.03180799999996,0.00362,276.0 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/main.log b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/main.log deleted file mode 100644 index 09e88fa7e806335d82ea73583f761fe2e5947b5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:08:44,080][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:08:44,081][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:08:46,006][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:08:46,006][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:08:46,007][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:08:46,007][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:08:46,007][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:08:46,007][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:08:46,653][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:08:46,654][inference][INFO] - Running inference benchmark -[2023-09-12 09:08:46,775][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:08:46,777][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:08:46,993][inference][INFO] - + Forward pass peak memory: 471.03180799999996 (MB) -[2023-09-12 09:08:46,993][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:08:46,995][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:08:47,034][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:08:52,079][inference][INFO] - + Forward pass latency: 3.62e-03 (s) -[2023-09-12 09:08:52,081][inference][INFO] - + Forward pass throughput: 276.00 (samples/s) -[2023-09-12 09:08:52,081][inference][INFO] - Saving inference results -[2023-09-12 09:08:52,092][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d2f56e3f94ae71263fbdd171156c67f00e21985e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 75bc0f1fafed4ab1a4ed03ed5141267e0d789e95..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.24364799999995,0.00371,1080.0 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/main.log b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/main.log deleted file mode 100644 index c776b46d7ec1aaef962340de4a4ef0ec25bb4315..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:08:52,480][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:08:52,481][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:08:52,897][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:08:52,897][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:08:52,897][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:08:52,898][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:08:52,898][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:08:52,898][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:08:53,012][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:08:53,013][inference][INFO] - Running inference benchmark -[2023-09-12 09:08:53,128][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:08:53,130][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:08:53,172][inference][INFO] - + Forward pass peak memory: 473.24364799999995 (MB) -[2023-09-12 09:08:53,173][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:08:53,175][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:08:53,226][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:08:58,268][inference][INFO] - + Forward pass latency: 3.71e-03 (s) -[2023-09-12 09:08:58,269][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-09-12 09:08:58,270][inference][INFO] - Saving inference results -[2023-09-12 09:08:58,277][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 85aae4e317f192336f517589ab1445e12a22b228..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 66e315934eebe9509b2f22b23f452bf34a572a14..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,472.129536,0.00395,253.0,0.48,208.0 diff --git a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 694eecad47a173c414a1c360c36175520c0a19cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-09_03:43:26_95b374952dc27d8511541d6f5a4e22c9ec11fb24/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:09:03,037][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:09:03,038][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:09:04,820][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:09:04,821][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:09:04,821][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:09:04,821][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:09:04,821][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:09:04,821][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:09:05,823][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:09:05,824][inference][INFO] - Running inference benchmark -[2023-09-12 09:09:06,011][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:09:06,212][inference][INFO] - + Forward pass peak memory: 472.129536 (MB) -[2023-09-12 09:09:06,214][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:09:06,250][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:09:11,296][inference][INFO] - + Forward pass latency: 3.95e-03 (s) -[2023-09-12 09:09:11,298][inference][INFO] - + Forward pass throughput: 253.00 (samples/s) -[2023-09-12 09:09:11,298][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:09:11,809][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:09:17,090][inference][INFO] - + Generation pass latency: 4.80e-01 (s) -[2023-09-12 09:09:17,090][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s) -[2023-09-12 09:09:17,091][inference][INFO] - Saving inference results -[2023-09-12 09:09:17,113][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index ce08d45855ed920d5af8df78134317bb499c6c32..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index ef26c8ba6cca179cbb40c5ae7a00c5c3184ec3d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,471.92064,0.00384,260.0 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/main.log b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/main.log deleted file mode 100644 index 84752783f52c5d3a3c06873e382fdee54ea6da1e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:10:35,289][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:10:35,290][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:10:36,577][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:10:36,577][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:10:36,578][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:10:36,578][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:10:36,578][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:10:36,578][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:10:37,280][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:10:37,281][inference][INFO] - Running inference benchmark -[2023-09-12 09:10:37,404][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:10:37,405][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:10:37,617][inference][INFO] - + Forward pass peak memory: 471.92064 (MB) -[2023-09-12 09:10:37,618][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:10:37,620][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:10:37,657][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:10:42,700][inference][INFO] - + Forward pass latency: 3.84e-03 (s) -[2023-09-12 09:10:42,702][inference][INFO] - + Forward pass throughput: 260.00 (samples/s) -[2023-09-12 09:10:42,702][inference][INFO] - Saving inference results -[2023-09-12 09:10:42,714][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 063b1d0f3aca20885def200e5400859a64eb8983..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9e999f759f1c10784758f569fd9c2d9e9bd11f75..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.32147199999997,0.00421,950.0 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/main.log b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/main.log deleted file mode 100644 index ceb255e1b94d5a691679cd359c1bf762390397c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:10:43,101][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:10:43,103][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:10:43,547][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:10:43,548][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:10:43,548][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:10:43,548][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:10:43,548][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:10:43,549][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:10:43,660][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:10:43,660][inference][INFO] - Running inference benchmark -[2023-09-12 09:10:43,784][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:10:43,785][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:10:43,830][inference][INFO] - + Forward pass peak memory: 473.32147199999997 (MB) -[2023-09-12 09:10:43,831][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:10:43,833][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:10:43,876][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:10:48,916][inference][INFO] - + Forward pass latency: 4.21e-03 (s) -[2023-09-12 09:10:48,917][inference][INFO] - + Forward pass throughput: 950.00 (samples/s) -[2023-09-12 09:10:48,917][inference][INFO] - Saving inference results -[2023-09-12 09:10:48,924][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 275084ea6f21fc481edabaf5f0befdb1ae3cf94a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 99ebcf042177b8e6aea5afb73ebb13331507a5ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,472.076288,0.00315,317.0,0.486,206.0 diff --git a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index d6c99dc1f62a0a07df968333bb14c382d4a5de01..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:56:36_7fd2d68613cf846aef9d3426873ba5a91a673bcf/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:10:53,681][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:10:53,683][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:10:55,082][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:10:55,082][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:10:55,082][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:10:55,083][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:10:55,083][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:10:55,083][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:10:55,757][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:10:55,758][inference][INFO] - Running inference benchmark -[2023-09-12 09:10:55,949][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:10:56,160][inference][INFO] - + Forward pass peak memory: 472.076288 (MB) -[2023-09-12 09:10:56,162][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:10:56,200][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:11:01,249][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-09-12 09:11:01,251][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-09-12 09:11:01,251][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:11:01,743][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:11:07,089][inference][INFO] - + Generation pass latency: 4.86e-01 (s) -[2023-09-12 09:11:07,090][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s) -[2023-09-12 09:11:07,090][inference][INFO] - Saving inference results -[2023-09-12 09:11:07,114][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 0aa55cfb71bfd6e2b4d757ae2ee3d5ed76264afa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6222242628e1e7171dfbc031e1f013a581171f8a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,472.305664,0.0031,323.0 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/main.log b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/main.log deleted file mode 100644 index 54814dcc5e9ee3dedf744a1980e866c319639679..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:12:26,399][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:12:26,401][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:12:27,720][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:12:27,720][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:12:27,720][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:12:27,720][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:12:27,721][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:12:27,721][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:12:28,360][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:12:28,361][inference][INFO] - Running inference benchmark -[2023-09-12 09:12:28,484][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:12:28,485][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:12:28,693][inference][INFO] - + Forward pass peak memory: 472.305664 (MB) -[2023-09-12 09:12:28,694][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:12:28,696][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:12:28,728][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:12:33,779][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-09-12 09:12:33,780][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-09-12 09:12:33,780][inference][INFO] - Saving inference results -[2023-09-12 09:12:33,791][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 47d4c7584260df03371c91bd6ccd431bedd92f88..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index e1e1bd758785bd3e988a348e82917a930197db13..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.77203199999997,0.00341,1170.0 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/main.log b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/main.log deleted file mode 100644 index 4d7852eaed738fe481923d3f0a984c35cdae41c5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:12:34,173][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:12:34,174][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:12:34,597][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:12:34,597][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:12:34,597][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:12:34,597][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:12:34,598][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:12:34,598][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:12:34,715][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:12:34,715][inference][INFO] - Running inference benchmark -[2023-09-12 09:12:34,831][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:12:34,832][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:12:34,875][inference][INFO] - + Forward pass peak memory: 473.77203199999997 (MB) -[2023-09-12 09:12:34,876][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:12:34,878][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:12:34,920][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:12:39,967][inference][INFO] - + Forward pass latency: 3.41e-03 (s) -[2023-09-12 09:12:39,969][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s) -[2023-09-12 09:12:39,969][inference][INFO] - Saving inference results -[2023-09-12 09:12:39,977][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index ad8cda1fa35909b98125ad2beab43444eb64d097..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 851d799d6e93242b1d6b1bdb137e9b796d26ccb6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,472.272896,0.00369,271.0,0.508,197.0 diff --git a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 245f6672e00ce08553abbd80ae030bee78f3fabe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_11:57:04_9cebae64ad33085eaa15ddf95ce6c900131881d5/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:12:44,857][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:12:44,858][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:12:46,229][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:12:46,230][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:12:46,230][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:12:46,230][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:12:46,230][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:12:46,231][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:12:46,870][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:12:46,871][inference][INFO] - Running inference benchmark -[2023-09-12 09:12:47,065][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:12:47,278][inference][INFO] - + Forward pass peak memory: 472.272896 (MB) -[2023-09-12 09:12:47,280][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:12:47,313][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:12:52,358][inference][INFO] - + Forward pass latency: 3.69e-03 (s) -[2023-09-12 09:12:52,359][inference][INFO] - + Forward pass throughput: 271.00 (samples/s) -[2023-09-12 09:12:52,361][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:12:52,853][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:12:57,939][inference][INFO] - + Generation pass latency: 5.08e-01 (s) -[2023-09-12 09:12:57,940][inference][INFO] - + Generation pass throughput: 197.00 (tokens/s) -[2023-09-12 09:12:57,940][inference][INFO] - Saving inference results -[2023-09-12 09:12:57,963][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7ff2c2dc152cc7b282bd4d0c630dc43fbe232548..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index eb6366ea554cac65a736d5a206bad64ecd8c8db6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,471.744512,0.00373,268.0 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/main.log b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/main.log deleted file mode 100644 index 25d2b2e35106c49c18d018d97977d99664fae45f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:14:17,277][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:14:17,278][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:14:18,475][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:14:18,476][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:14:18,476][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:14:18,476][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:14:18,476][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:14:18,477][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:14:19,093][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:14:19,094][inference][INFO] - Running inference benchmark -[2023-09-12 09:14:19,216][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:14:19,217][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:14:19,427][inference][INFO] - + Forward pass peak memory: 471.744512 (MB) -[2023-09-12 09:14:19,428][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:14:19,430][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:14:19,463][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:14:24,507][inference][INFO] - + Forward pass latency: 3.73e-03 (s) -[2023-09-12 09:14:24,509][inference][INFO] - + Forward pass throughput: 268.00 (samples/s) -[2023-09-12 09:14:24,509][inference][INFO] - Saving inference results -[2023-09-12 09:14:24,519][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index c16daa8f2c8c4a2c10074ae9fcdc12aa0a4fa67f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index bdc1ab1698a63eaa2df426ffe2d00af676c47be8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.841664,0.0043,930.0 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/main.log b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/main.log deleted file mode 100644 index f60fff5dfeaeccf071f00045a28e163b0b30b191..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:14:24,902][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:14:24,903][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:14:25,377][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:14:25,377][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:14:25,378][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:14:25,378][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:14:25,378][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:14:25,378][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:14:25,499][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:14:25,499][inference][INFO] - Running inference benchmark -[2023-09-12 09:14:25,624][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:14:25,625][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:14:25,671][inference][INFO] - + Forward pass peak memory: 473.841664 (MB) -[2023-09-12 09:14:25,672][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:14:25,674][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:14:25,718][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:14:30,759][inference][INFO] - + Forward pass latency: 4.30e-03 (s) -[2023-09-12 09:14:30,760][inference][INFO] - + Forward pass throughput: 930.00 (samples/s) -[2023-09-12 09:14:30,760][inference][INFO] - Saving inference results -[2023-09-12 09:14:30,768][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 59b3bf8c4ef6004a13587d438feefef2c91e213d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 5f5fae29a1be0ce6a85e479c4600b43c3d19e9ad..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,471.744512,0.00324,309.0,0.563,178.0 diff --git a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 5b5fff2066801419967e5228d5e0bfa51534578a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_15:20:29_ce2e7ef3d96afaf592faf3337b7dd997c7ad4928/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:14:35,545][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:14:35,546][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:14:36,920][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:14:36,920][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:14:36,920][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:14:36,920][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:14:36,921][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:14:36,921][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:14:37,591][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:14:37,592][inference][INFO] - Running inference benchmark -[2023-09-12 09:14:37,787][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:14:37,993][inference][INFO] - + Forward pass peak memory: 471.744512 (MB) -[2023-09-12 09:14:37,995][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:14:38,028][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:14:43,078][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-12 09:14:43,079][inference][INFO] - + Forward pass throughput: 309.00 (samples/s) -[2023-09-12 09:14:43,080][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:14:43,576][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:14:48,641][inference][INFO] - + Generation pass latency: 5.63e-01 (s) -[2023-09-12 09:14:48,642][inference][INFO] - + Generation pass throughput: 178.00 (tokens/s) -[2023-09-12 09:14:48,642][inference][INFO] - Saving inference results -[2023-09-12 09:14:48,667][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index d75847bed7772db4647c7b58e851f1868b48e619..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b1207611196e0d23d7119afd6da46f609ef33f97..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 36a53864c4e26f9e38905bad2498ae02dc1b767f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 4e0bf5b49532c8da8d57fbf9303145b36af6b7e5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,471.392256,0.0031,323.0 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/main.log b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/main.log deleted file mode 100644 index 05233c04d94ae8de363419954ab3190e7e7dc0d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:16:07,875][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:16:07,876][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:16:09,082][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:16:09,083][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:16:09,083][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:16:09,083][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:16:09,083][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:16:09,083][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:16:09,709][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:16:09,710][inference][INFO] - Running inference benchmark -[2023-09-12 09:16:09,831][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:16:09,832][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:16:10,042][inference][INFO] - + Forward pass peak memory: 471.392256 (MB) -[2023-09-12 09:16:10,043][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:16:10,045][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:16:10,077][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:16:15,126][inference][INFO] - + Forward pass latency: 3.10e-03 (s) -[2023-09-12 09:16:15,128][inference][INFO] - + Forward pass throughput: 323.00 (samples/s) -[2023-09-12 09:16:15,128][inference][INFO] - Saving inference results -[2023-09-12 09:16:15,139][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index 344ec2f935a31a19a62e706f12f4bb8e09c4c386..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 7195eb9e75504f284e18c0af0e889f720013eb99..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: main - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 9af0e66383a2862a3d036ea4917e3bacabb881ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 6bbdc4982371fc9b1795ced575dda1ec0c3cd0c4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,473.65324799999996,0.00349,1150.0 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/main.log b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/main.log deleted file mode 100644 index cba10f5677a08bea588241d564caf35975f4e9aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_bert_inference/1/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-09-12 09:16:15,518][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:16:15,519][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:16:15,969][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-12 09:16:15,970][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:16:15,970][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:16:15,970][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:16:15,970][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:16:15,970][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:16:16,091][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:16:16,091][inference][INFO] - Running inference benchmark -[2023-09-12 09:16:16,212][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:16:16,213][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:16:16,259][inference][INFO] - + Forward pass peak memory: 473.65324799999996 (MB) -[2023-09-12 09:16:16,260][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-09-12 09:16:16,262][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:16:16,313][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:16:21,359][inference][INFO] - + Forward pass latency: 3.49e-03 (s) -[2023-09-12 09:16:21,360][inference][INFO] - + Forward pass throughput: 1150.00 (samples/s) -[2023-09-12 09:16:21,360][inference][INFO] - Saving inference results -[2023-09-12 09:16:21,368][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index 7e32a35f367f532a16d65c94e459c4f30c9188c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index d579b112a87b0f7897e7ef70dd45789a959d3a78..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: main - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 5cf5f1ff54ffd6523449b409066f7697ce8772db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 5 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index fab1f52ee610d86eca0f23ccc0f521840c814193..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,472.2688,0.00321,312.0,0.482,207.0 diff --git a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/main.log b/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index c963f7cd50589de8148a420ecac72a1491de2239..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-11_21:03:26_5af2c6269672cda01c24ad48fab13f14a3ffb746/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-09-12 09:16:26,154][benchmark][INFO] - Configuring inference benchmark -[2023-09-12 09:16:26,156][benchmark][INFO] - + Setting seed(42) -[2023-09-12 09:16:27,656][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-12 09:16:27,656][backend][INFO] - Configuring pytorch backend -[2023-09-12 09:16:27,656][backend][INFO] - + Checking initial device isolation -[2023-09-12 09:16:27,657][backend][INFO] - + Checking contineous device isolation -[2023-09-12 09:16:27,657][pytorch][INFO] - + Disabling gradients -[2023-09-12 09:16:27,657][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-09-12 09:16:28,279][pytorch][INFO] - + Turning on eval mode -[2023-09-12 09:16:28,280][inference][INFO] - Running inference benchmark -[2023-09-12 09:16:28,468][inference][INFO] - + Tracking forward pass peak memory -[2023-09-12 09:16:28,672][inference][INFO] - + Forward pass peak memory: 472.2688 (MB) -[2023-09-12 09:16:28,674][inference][INFO] - + Warming up the forward pass -[2023-09-12 09:16:28,707][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-12 09:16:33,756][inference][INFO] - + Forward pass latency: 3.21e-03 (s) -[2023-09-12 09:16:33,757][inference][INFO] - + Forward pass throughput: 312.00 (samples/s) -[2023-09-12 09:16:33,758][inference][INFO] - + Warming up the generation pass -[2023-09-12 09:16:34,251][inference][INFO] - + Tracking generation latency and throughput -[2023-09-12 09:16:39,553][inference][INFO] - + Generation pass latency: 4.82e-01 (s) -[2023-09-12 09:16:39,554][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s) -[2023-09-12 09:16:39,554][inference][INFO] - Saving inference results -[2023-09-12 09:16:39,578][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 833d05497b59ba26a18c6a302d0bb91fe2b458ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 795072c00dec3026830937d03be52bdbe5925e45..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index 8d6d8f16f84a50b1a79c5e37697d3e62129d7306..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/experiment.log deleted file mode 100644 index 95f6372c792238dcf67619d5f5fd927ee92fbcda..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-20 15:11:07,454][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:11:11,376][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:11:11,376][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:11:11,379][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:11:11,507][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:11:11,521][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:11:20,233][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:11:20,234][training][INFO] - Running training benchmark -[2023-09-20 15:11:20,234][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:11:20,295][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:11:20,295][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:11:20,297][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:11:20,301][pytorch][INFO] - + Starting training -[2023-09-20 15:11:55,804][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:11:55,805][training][INFO] - Saving training results -[2023-09-20 15:11:55,808][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:11:55,808][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index a75f0da9b0aa0151767b6e6858f2f15bc4de54c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 0f9817db669ac863de4b8dd96eed6f435147c084..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,4.290498971939087,149.16679952279597,31.10353970527649,172.3276530835079,35.39404010772705,151.43792524634202 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 8d5c88f6241de713f22a4fbaee0c5bb7f23f5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index fbb811cda171f1c97ac992368f72453abc4ac6d1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index ec1fe39ea16e445c5d7092a8b3e071d9b0c55522..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/experiment.log deleted file mode 100644 index e762474c37c4b2105998996af79dccbf19a6e6bc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:11:57,419][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:11:57,419][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:11:57,420][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:11:57,541][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:11:57,557][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:11:58,060][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:11:58,061][training][INFO] - Running training benchmark -[2023-09-20 15:11:58,061][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:11:58,101][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:11:58,102][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:11:58,102][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:11:58,107][pytorch][INFO] - + Starting training -[2023-09-20 15:13:04,107][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:13:04,108][training][INFO] - Saving training results -[2023-09-20 15:13:04,109][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:13:04,110][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index abcce5eda3bfbb21561ba578be643fe874f4ee2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 9c3089463e33ed56c44a24a29d1d18f7e975ad0f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,7.094875812530518,90.20594819569256,58.791839599609375,91.1691152463209,65.88671660423279,81.35175459108635 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/config.yaml deleted file mode 100644 index e7ef28c956bde8dedcb111288d0157eca31b1c62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/hydra.yaml deleted file mode 100644 index 1efcc5c62b844664c93858685a3b8e038bcc63af..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '2' - num: 2 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/experiment.log deleted file mode 100644 index 962fe013027f7d2a82f6794233f2f039ca814347..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:13:05,776][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:13:05,776][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:13:05,777][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:13:05,898][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:13:05,915][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:13:06,469][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:13:06,469][training][INFO] - Running training benchmark -[2023-09-20 15:13:06,469][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:13:06,507][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:13:06,508][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:13:06,509][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:13:06,514][pytorch][INFO] - + Starting training -[2023-09-20 15:13:40,062][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:13:40,062][training][INFO] - Saving training results -[2023-09-20 15:13:40,064][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:13:40,064][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index af5e494ca3514c36d5e090f1af921bd6e849a818..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/training_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index db3594615550e3a271b7f26d2767fecdf50ae08f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,7.056463956832886,181.39396839978977,26.38365411758423,180.71795433454437,33.440120220184326,142.58321945631207 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/config.yaml deleted file mode 100644 index bf1190e8785c39d372b5122d20df36f210ecf85f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/hydra.yaml deleted file mode 100644 index de7e56e0d781173e39ecd6ed2a0d90c9815d09f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '3' - num: 3 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/experiment.log deleted file mode 100644 index d695adcfa3e18baa74811fac42a8054940312272..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:13:41,685][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:13:41,686][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:13:41,686][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:13:41,809][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:13:41,825][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:13:42,384][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:13:42,384][training][INFO] - Running training benchmark -[2023-09-20 15:13:42,384][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:13:42,427][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:13:42,427][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:13:42,428][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:13:42,433][pytorch][INFO] - + Starting training -[2023-09-20 15:14:47,872][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:14:47,873][training][INFO] - Saving training results -[2023-09-20 15:14:47,875][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:14:47,875][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index f53a28377945009d7294ad9edd5f274d64fe05f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/training_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index ee536de2ac0626b4bc152881c77a582a738505a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,14.335822343826294,89.28682075578529,50.9871141910553,93.51382355419624,65.32293772697449,72.99120593639651 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index 31586216610ff3c8e9b7efe6d31ed5af08de26e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index be215beba88b268b46225211d8938c02ef709ab1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 90e12a0f22295a70093b749b95bbeb0faa31d634..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-20 15:15:32,702][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:15:32,851][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:15:36,880][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:15:36,880][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:15:36,882][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:15:37,008][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:15:37,023][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:15:37,024][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:15:38,557][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:15:38,561][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:15:38,561][inference][INFO] - Running inference benchmark -[2023-09-20 15:15:38,561][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:15:38,583][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:15:38,583][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:15:38,903][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:15:38,903][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:15:54,204][inference][INFO] - + Forward pass latency: 3.30e-03 (s) -[2023-09-20 15:15:54,207][inference][INFO] - + Forward pass throughput: 303.00 (samples/s) -[2023-09-20 15:15:54,207][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:15:54,207][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:15:55,442][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:15:55,443][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:16:10,962][inference][INFO] - + Generation pass latency: 5.17e-01 (s) -[2023-09-20 15:16:10,962][inference][INFO] - + Generation pass throughput: 387.00 (tokens/s) -[2023-09-20 15:16:10,962][inference][INFO] - Saving inference results -[2023-09-20 15:16:10,971][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:16:10,971][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 82eb70a083058b7ed98a1ae91483b49ea13d5d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index b27ebe8438761675d151f0953026b317cfeb0e3e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.0033,303.0,0.517,387.0 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 43b47967479b8340e846fdf11ba343f34adbfe5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index f13203d5592533ac7c7132ff8ba71207df2df6da..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index 4bd63b9e83156f62db97fc5b2ee8fbb81ddc6318..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:16:11,612][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:16:12,603][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:16:12,603][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:16:12,604][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:16:12,726][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:16:12,742][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:16:12,743][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:16:13,121][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:16:13,122][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:16:13,122][inference][INFO] - Running inference benchmark -[2023-09-20 15:16:13,122][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:16:13,123][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:16:13,123][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:16:13,251][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:16:13,252][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:16:28,813][inference][INFO] - + Forward pass latency: 2.58e-03 (s) -[2023-09-20 15:16:28,817][inference][INFO] - + Forward pass throughput: 388.00 (samples/s) -[2023-09-20 15:16:28,817][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:16:28,817][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:16:29,216][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:16:29,216][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:16:44,829][inference][INFO] - + Generation pass latency: 5.38e-01 (s) -[2023-09-20 15:16:44,830][inference][INFO] - + Generation pass throughput: 372.00 (tokens/s) -[2023-09-20 15:16:44,830][inference][INFO] - Saving inference results -[2023-09-20 15:16:44,838][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:16:44,838][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 74699b4dde858de23b60cd15505dc3745c7fa948..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index 63afd9c056ba8c1dbc5f179f816dccba40c575cd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00258,388.0,0.538,372.0 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index 4977442d8bcc23fcf97d0e094e48173757e090dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index 924a02b99e0015feb4d17f61678b35a5350a2de6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index c69217b224d7d7384494b740a458d2ae8b0a3762..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:16:45,473][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:16:46,629][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:16:46,629][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:16:46,630][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:16:46,753][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:16:46,772][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:16:46,773][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:16:47,203][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:16:47,203][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:16:47,203][inference][INFO] - Running inference benchmark -[2023-09-20 15:16:47,203][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:16:47,204][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:16:47,204][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:16:47,227][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:16:47,227][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:17:02,581][inference][INFO] - + Forward pass latency: 6.08e-03 (s) -[2023-09-20 15:17:02,583][inference][INFO] - + Forward pass throughput: 2630.00 (samples/s) -[2023-09-20 15:17:02,583][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:17:02,583][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:17:04,169][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:17:04,169][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:17:19,849][inference][INFO] - + Generation pass latency: 7.84e-01 (s) -[2023-09-20 15:17:19,850][inference][INFO] - + Generation pass throughput: 4080.00 (tokens/s) -[2023-09-20 15:17:19,850][inference][INFO] - Saving inference results -[2023-09-20 15:17:19,854][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:17:19,855][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 46e7c942b1f98367f69538013206b4b487b0db82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 5cc80b507f3625219a10d2a2d4b4c4413ce24870..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00608,2630.0,0.784,4080.0 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 75a2cec740a09f8fed297594d9998531f5ae4881..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index ce994a984352afaf2d4bd79f6d9e848c640030e3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index e0bab06836b2cea4544303c8052fd0d58c538b01..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:17:20,557][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:17:21,525][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:17:21,525][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:17:21,526][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:17:21,649][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:17:21,669][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:17:21,670][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:17:22,045][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:17:22,045][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:17:22,046][inference][INFO] - Running inference benchmark -[2023-09-20 15:17:22,046][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:17:22,047][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:17:22,047][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:17:22,069][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:17:22,069][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:17:37,344][inference][INFO] - + Forward pass latency: 5.68e-03 (s) -[2023-09-20 15:17:37,346][inference][INFO] - + Forward pass throughput: 2820.00 (samples/s) -[2023-09-20 15:17:37,346][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:17:37,346][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:17:37,961][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:17:37,961][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:17:53,342][inference][INFO] - + Generation pass latency: 9.05e-01 (s) -[2023-09-20 15:17:53,342][inference][INFO] - + Generation pass throughput: 3540.00 (tokens/s) -[2023-09-20 15:17:53,342][inference][INFO] - Saving inference results -[2023-09-20 15:17:53,347][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:17:53,347][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 2404fd246618d0489ff3ba123302854d0830756a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index b5128c6dc43c472ce7231f158dbddf5bba1e77dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00568,2820.0,0.905,3540.0 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index b0c83089a2621f9950400d418a85b4376221df44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e08f90f358e16adf0ff05f9cc868d65d45cb9da1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/experiment.log deleted file mode 100644 index 2e3bbf9d6832ddca739abc2249a48e181d2e7c92..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 15:14:55,401][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:14:55,401][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:14:55,404][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:14:55,404][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:14:56,529][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:14:56,536][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:14:56,536][inference][INFO] - Running inference benchmark -[2023-09-20 15:14:56,536][input_generator][INFO] - Using bert model type generator -[2023-09-20 15:14:56,537][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:14:56,537][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:14:56,840][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:15:01,883][inference][INFO] - + Forward pass latency: 3.16e-03 (s) -[2023-09-20 15:15:01,884][inference][INFO] - + Forward pass throughput: 316.00 (samples/s) -[2023-09-20 15:15:01,884][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:15:01,923][inference][INFO] - + Forward pass peak memory: 551 (MB) -[2023-09-20 15:15:01,923][inference][INFO] - Saving inference results -[2023-09-20 15:15:01,929][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:15:01,929][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8159f39871a74c9f18130d7533a9047d427de8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 066b870023e40ea752871bed1ce294b853e410b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0,0.00316,316.0,551 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index cf6b2100f521049a9838a290716b3948d38db210..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 415e402786d96fa8096eae6c2dcdea7f016e6e42..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/experiment.log deleted file mode 100644 index 7491277164acdabf30794b16a00c107a938967fc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 15:15:03,126][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:15:03,126][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:15:03,127][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:15:03,127][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:15:03,327][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:15:03,327][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:15:03,327][inference][INFO] - Running inference benchmark -[2023-09-20 15:15:03,327][input_generator][INFO] - Using bert model type generator -[2023-09-20 15:15:03,328][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:15:03,328][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:15:03,369][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:15:08,404][inference][INFO] - + Forward pass latency: 3.68e-03 (s) -[2023-09-20 15:15:08,405][inference][INFO] - + Forward pass throughput: 1090.00 (samples/s) -[2023-09-20 15:15:08,405][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:15:08,437][inference][INFO] - + Forward pass peak memory: 553 (MB) -[2023-09-20 15:15:08,438][inference][INFO] - Saving inference results -[2023-09-20 15:15:08,442][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:15:08,442][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 00d5f274372d051cb01e74f2a4b26b9ac2511e68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 21c08b8e085af5cf6a1b81971fe1df25b3583283..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0,0.00368,1090.0,553 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index aa8ba5cebbb00543dea83d42e1a3242be877425e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2ff34f4c3c386440e2c8c10c231420f61bde8fb4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/experiment.log b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/experiment.log deleted file mode 100644 index d60a60d794ba37aaf01fe39b0c4c49786fa5f707..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/experiment.log +++ /dev/null @@ -1,25 +0,0 @@ -[2023-09-20 15:15:16,298][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-20 15:15:16,299][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:15:16,301][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:15:16,301][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:15:17,730][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:15:17,734][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:15:17,734][inference][INFO] - Running inference benchmark -[2023-09-20 15:15:17,734][input_generator][INFO] - Using gpt2 model type generator -[2023-09-20 15:15:17,735][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:15:17,735][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:15:17,932][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:15:22,973][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-20 15:15:22,975][inference][INFO] - + Forward pass throughput: 617.00 (samples/s) -[2023-09-20 15:15:22,975][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:15:23,025][inference][INFO] - + Forward pass peak memory: 555 (MB) -[2023-09-20 15:15:23,026][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:15:23,027][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:15:23,330][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:15:28,550][inference][INFO] - + Generation pass latency: 3.07e-01 (s) -[2023-09-20 15:15:28,550][inference][INFO] - + Generation pass throughput: 651.00 (tokens/s) -[2023-09-20 15:15:28,550][inference][INFO] - + Tracking generation pass peak memory -[2023-09-20 15:15:28,901][inference][INFO] - + Generation pass peak memory: 558 (MB) -[2023-09-20 15:15:28,902][inference][INFO] - Saving inference results -[2023-09-20 15:15:28,908][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:15:28,908][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index f46fa028eedfeac3a6b4c27f9b2b7fe54502d316..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 100 - min_new_tokens: 100 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index c56ae3d524e2b936b27595c41f9170fd18146286..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:18:49_f29fe7458953dbf00addaf793d95ea1965bc8441/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),generate.latency(s),generate.throughput(tokens/s),generate.peak_memory(MB) -0,0.00324,617.0,555,0.307,651.0,558 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 833d05497b59ba26a18c6a302d0bb91fe2b458ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 3be0732992ae413f9e4584647d8a85e7f3a3098c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index 8d6d8f16f84a50b1a79c5e37697d3e62129d7306..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/experiment.log deleted file mode 100644 index 1fb47f7fd8e45d59bc066c653bd96edb5e4df5db..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-20 15:18:23,417][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:18:27,313][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:18:27,313][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:18:27,315][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:18:27,446][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:18:27,459][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:18:36,164][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:18:36,164][training][INFO] - Running training benchmark -[2023-09-20 15:18:36,165][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:18:36,224][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:18:36,224][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:18:36,226][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:18:36,230][pytorch][INFO] - + Starting training -[2023-09-20 15:19:11,732][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:19:11,733][training][INFO] - Saving training results -[2023-09-20 15:19:11,736][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:19:11,736][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index a75f0da9b0aa0151767b6e6858f2f15bc4de54c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index dc630f4b81e3e9bf5614edf1f97520f205c30d4e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,4.299026012420654,148.87092986898094,31.091834545135498,172.39252937033925,35.39086174964905,151.4515254789791 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 8d5c88f6241de713f22a4fbaee0c5bb7f23f5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index 5e052bcc9c8bba515985a6d906110190a3f762f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index ec1fe39ea16e445c5d7092a8b3e071d9b0c55522..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/experiment.log deleted file mode 100644 index 051e3f9dac4e53a6f8fdc77e2e41eb5a6619a6c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:19:13,539][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:19:13,539][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:19:13,540][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:19:13,657][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:19:13,672][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:19:14,162][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:19:14,163][training][INFO] - Running training benchmark -[2023-09-20 15:19:14,163][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:19:14,208][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:19:14,208][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:19:14,209][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:19:14,214][pytorch][INFO] - + Starting training -[2023-09-20 15:20:28,198][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:20:28,199][training][INFO] - Saving training results -[2023-09-20 15:20:28,201][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:20:28,201][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index abcce5eda3bfbb21561ba578be643fe874f4ee2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index a608ca05ada8a0e332a295417deaae12fcc26869..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,8.022400379180908,79.77662167807989,65.8548059463501,81.39117446290295,73.87720775604248,72.55282329700124 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/config.yaml deleted file mode 100644 index e7ef28c956bde8dedcb111288d0157eca31b1c62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/hydra.yaml deleted file mode 100644 index db632c39e47e1f8725eebfa3e77e5092f2bfecec..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '2' - num: 2 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/experiment.log deleted file mode 100644 index 9f6bde89e6c84b450a06eccad57370326be1d179..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:20:30,033][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:20:30,033][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:20:30,034][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:20:30,154][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:20:30,171][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:20:30,639][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:20:30,639][training][INFO] - Running training benchmark -[2023-09-20 15:20:30,640][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:20:30,679][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:20:30,679][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:20:30,680][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:20:30,685][pytorch][INFO] - + Starting training -[2023-09-20 15:21:15,450][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:21:15,451][training][INFO] - Saving training results -[2023-09-20 15:21:15,454][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:21:15,454][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index af5e494ca3514c36d5e090f1af921bd6e849a818..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/training_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index fd9864bbeeccca9f905752a9b8910fbb32b0aa4c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,9.80002760887146,130.61187693402843,34.85708165168762,136.78712542962285,44.65711045265198,106.76911138384796 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/config.yaml deleted file mode 100644 index bf1190e8785c39d372b5122d20df36f210ecf85f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/hydra.yaml deleted file mode 100644 index 19fd72fb4c45eab20516fc37ff7c87afbe80a18b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '3' - num: 3 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/experiment.log deleted file mode 100644 index 6e88a1fa8c50748fe35778ae6daf937357006c37..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:21:17,167][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:21:17,168][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:21:17,168][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:21:17,289][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:21:17,306][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:21:17,782][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:21:17,782][training][INFO] - Running training benchmark -[2023-09-20 15:21:17,782][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:21:17,821][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:21:17,822][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:21:17,822][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:21:17,827][pytorch][INFO] - + Starting training -[2023-09-20 15:22:47,250][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:22:47,251][training][INFO] - Saving training results -[2023-09-20 15:22:47,253][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:22:47,253][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index f53a28377945009d7294ad9edd5f274d64fe05f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/training_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index 04606f0b568aaa5825acd44b25e29b9922f495a9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,18.946444749832153,67.55884900312707,70.27028131484985,67.85229702776789,89.21672701835632,53.44289304649099 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index 31586216610ff3c8e9b7efe6d31ed5af08de26e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index d37567d60ddd19c900ca4322d3995935148f69ab..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index a2ff47363d191ef5884df6ed3d0405ed0cdec7e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-20 15:23:31,916][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:23:32,068][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:23:36,862][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:23:36,862][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:23:36,863][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:23:36,989][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:23:37,004][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:23:37,006][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:23:38,414][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:23:38,418][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:23:38,418][inference][INFO] - Running inference benchmark -[2023-09-20 15:23:38,418][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:23:38,439][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:23:38,439][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:23:38,759][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:23:38,759][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:23:54,289][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-09-20 15:23:54,292][inference][INFO] - + Forward pass throughput: 313.00 (samples/s) -[2023-09-20 15:23:54,292][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:23:54,292][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:23:55,535][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:23:55,536][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:24:11,133][inference][INFO] - + Generation pass latency: 5.20e-01 (s) -[2023-09-20 15:24:11,133][inference][INFO] - + Generation pass throughput: 385.00 (tokens/s) -[2023-09-20 15:24:11,133][inference][INFO] - Saving inference results -[2023-09-20 15:24:11,142][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:24:11,142][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 82eb70a083058b7ed98a1ae91483b49ea13d5d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index f4cbc8b3168411ac6e99eacc6b66259aeddac673..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00319,313.0,0.52,385.0 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 43b47967479b8340e846fdf11ba343f34adbfe5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index 47000cb71b9ecf431aa854b341033d8b899a2e30..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index 146b921a816aeb6dcbcc1dd6731f706563d695de..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,41 +0,0 @@ -[2023-09-20 15:11:01,951][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:11:03,121][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:11:03,121][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:11:03,122][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:11:03,244][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:11:03,261][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:11:03,262][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:11:03,635][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:11:03,636][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:11:03,636][inference][INFO] - Running inference benchmark -[2023-09-20 15:11:03,636][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:11:03,637][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:11:03,637][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:11:03,771][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:11:03,772][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:24:11,804][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:24:12,776][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:24:12,777][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:24:12,777][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:24:12,901][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:24:12,917][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:24:12,918][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:24:13,286][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:24:13,287][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:24:13,287][inference][INFO] - Running inference benchmark -[2023-09-20 15:24:13,287][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:24:13,288][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:24:13,288][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:24:13,418][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:24:13,418][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:24:29,101][inference][INFO] - + Forward pass latency: 2.29e-03 (s) -[2023-09-20 15:24:29,104][inference][INFO] - + Forward pass throughput: 437.00 (samples/s) -[2023-09-20 15:24:29,105][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:24:29,105][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:24:29,516][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:24:29,516][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:24:44,964][inference][INFO] - + Generation pass latency: 4.83e-01 (s) -[2023-09-20 15:24:44,965][inference][INFO] - + Generation pass throughput: 414.00 (tokens/s) -[2023-09-20 15:24:44,965][inference][INFO] - Saving inference results -[2023-09-20 15:24:44,974][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:24:44,974][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 74699b4dde858de23b60cd15505dc3745c7fa948..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index 5f819b20698617be2d484c18dbe0739fcf450dd5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00229,437.0,0.483,414.0 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index 4977442d8bcc23fcf97d0e094e48173757e090dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index 14763a7124d3fb86a2a335e96cff2621d3b7885c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index 39ba55b5727ae537986b31c50f97d0425f1ad0b1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:24:45,642][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:24:46,602][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:24:46,602][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:24:46,603][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:24:46,722][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:24:46,741][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:24:46,742][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:24:47,108][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:24:47,108][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:24:47,109][inference][INFO] - Running inference benchmark -[2023-09-20 15:24:47,109][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:24:47,110][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:24:47,110][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:24:47,152][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:24:47,152][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:25:02,492][inference][INFO] - + Forward pass latency: 3.98e-03 (s) -[2023-09-20 15:25:02,494][inference][INFO] - + Forward pass throughput: 4020.00 (samples/s) -[2023-09-20 15:25:02,494][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:25:02,495][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:25:04,079][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:25:04,079][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:25:19,290][inference][INFO] - + Generation pass latency: 5.43e-01 (s) -[2023-09-20 15:25:19,290][inference][INFO] - + Generation pass throughput: 5890.00 (tokens/s) -[2023-09-20 15:25:19,290][inference][INFO] - Saving inference results -[2023-09-20 15:25:19,296][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:25:19,296][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 46e7c942b1f98367f69538013206b4b487b0db82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 6b58e5c39b48a14997040c82ad6d9e3565cf8eb5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00398,4020.0,0.543,5890.0 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 75a2cec740a09f8fed297594d9998531f5ae4881..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index 107a5eda1fe3cecc9c7077b757fd36ebe8ae1bb7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index 88e352da9afcd2c23335f60e6419df60fd624fc7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:25:19,941][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:25:20,983][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:25:20,983][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:25:20,984][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:25:21,102][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:25:21,122][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:25:21,123][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:25:21,500][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:25:21,501][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:25:21,502][inference][INFO] - Running inference benchmark -[2023-09-20 15:25:21,502][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:25:21,502][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:25:21,503][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:25:21,525][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:25:21,525][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:25:36,794][inference][INFO] - + Forward pass latency: 5.31e-03 (s) -[2023-09-20 15:25:36,796][inference][INFO] - + Forward pass throughput: 3010.00 (samples/s) -[2023-09-20 15:25:36,797][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:25:36,797][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:25:37,825][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:25:37,825][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:25:53,643][inference][INFO] - + Generation pass latency: 8.79e-01 (s) -[2023-09-20 15:25:53,643][inference][INFO] - + Generation pass throughput: 3640.00 (tokens/s) -[2023-09-20 15:25:53,643][inference][INFO] - Saving inference results -[2023-09-20 15:25:53,649][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:25:53,649][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 2404fd246618d0489ff3ba123302854d0830756a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index 54e0d6c626f7542c38df215df192560d3d2472e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00531,3010.0,0.879,3640.0 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index b0c83089a2621f9950400d418a85b4376221df44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index bd406386af895718ab4dfbf9f0a82e39071f8654..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/experiment.log deleted file mode 100644 index 1ae01545977a029ecb8447c69866eba2ea49b8b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 15:22:55,295][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:22:55,295][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:22:55,298][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:22:55,298][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:22:56,420][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:22:56,428][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:22:56,428][inference][INFO] - Running inference benchmark -[2023-09-20 15:22:56,428][input_generator][INFO] - Using bert model type generator -[2023-09-20 15:22:56,428][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:22:56,429][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:22:56,710][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:23:01,748][inference][INFO] - + Forward pass latency: 3.22e-03 (s) -[2023-09-20 15:23:01,750][inference][INFO] - + Forward pass throughput: 311.00 (samples/s) -[2023-09-20 15:23:01,750][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:23:01,779][inference][INFO] - + Forward pass peak memory: 551 (MB) -[2023-09-20 15:23:01,782][inference][INFO] - Saving inference results -[2023-09-20 15:23:01,789][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:23:01,789][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8159f39871a74c9f18130d7533a9047d427de8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 49ce9b37d154c41d0da7f4d202b36e846329f655..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0,0.00322,311.0,551 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index cf6b2100f521049a9838a290716b3948d38db210..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index b3c00216b5e9a8da94bbe2e79e81d995937df795..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/experiment.log deleted file mode 100644 index 83b572b9ac5b676fc67b932c31051553b277fd66..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 15:23:02,979][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:23:02,979][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:23:02,980][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:23:02,980][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:23:03,183][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:23:03,183][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:23:03,184][inference][INFO] - Running inference benchmark -[2023-09-20 15:23:03,184][input_generator][INFO] - Using bert model type generator -[2023-09-20 15:23:03,184][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:23:03,184][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:23:03,221][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:23:08,256][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-09-20 15:23:08,257][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-09-20 15:23:08,257][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:23:08,302][inference][INFO] - + Forward pass peak memory: 554 (MB) -[2023-09-20 15:23:08,302][inference][INFO] - Saving inference results -[2023-09-20 15:23:08,306][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:23:08,306][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 00d5f274372d051cb01e74f2a4b26b9ac2511e68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 7ddead1f9c9bee03bb5d02666976fdc7440f750e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0,0.0037,1080.0,554 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index aa8ba5cebbb00543dea83d42e1a3242be877425e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 6cb015abb10ef940fd5d8d9dca227124003eba04..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/experiment.log b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/experiment.log deleted file mode 100644 index 984a7d8c31fa39e7803d42a27066f34a21513bdf..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/experiment.log +++ /dev/null @@ -1,25 +0,0 @@ -[2023-09-20 15:23:15,600][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-20 15:23:15,601][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:23:15,603][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:23:15,603][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:23:17,042][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:23:17,046][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:23:17,046][inference][INFO] - Running inference benchmark -[2023-09-20 15:23:17,046][input_generator][INFO] - Using gpt2 model type generator -[2023-09-20 15:23:17,046][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:23:17,046][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:23:17,275][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:23:22,316][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-20 15:23:22,317][inference][INFO] - + Forward pass throughput: 617.00 (samples/s) -[2023-09-20 15:23:22,317][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:23:22,362][inference][INFO] - + Forward pass peak memory: 554 (MB) -[2023-09-20 15:23:22,363][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:23:22,363][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:23:22,666][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:23:27,762][inference][INFO] - + Generation pass latency: 3.00e-01 (s) -[2023-09-20 15:23:27,763][inference][INFO] - + Generation pass throughput: 667.00 (tokens/s) -[2023-09-20 15:23:27,763][inference][INFO] - + Tracking generation pass peak memory -[2023-09-20 15:23:28,133][inference][INFO] - + Generation pass peak memory: 560 (MB) -[2023-09-20 15:23:28,134][inference][INFO] - Saving inference results -[2023-09-20 15:23:28,140][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:23:28,141][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index f46fa028eedfeac3a6b4c27f9b2b7fe54502d316..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 100 - min_new_tokens: 100 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 728c2b917eea893acc669bbfc89b00114eb86bb2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:40:14_245532065d3ceddf1c0f8cb3e60ab6451861100a/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),generate.latency(s),generate.throughput(tokens/s),generate.peak_memory(MB) -0,0.00324,617.0,554,0.3,667.0,560 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 833d05497b59ba26a18c6a302d0bb91fe2b458ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 216a7fb5f756f5596cf4392de718ef93f72aab98..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index 8d6d8f16f84a50b1a79c5e37697d3e62129d7306..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/experiment.log deleted file mode 100644 index c45739e0984489db4edcc9222078d5862d46d74d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-20 15:26:23,922][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:26:28,070][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:26:28,071][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:26:28,073][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:26:28,202][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:26:28,215][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:26:36,920][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:26:36,921][training][INFO] - Running training benchmark -[2023-09-20 15:26:36,921][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:26:36,979][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:26:36,979][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:26:36,981][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:26:36,985][pytorch][INFO] - + Starting training -[2023-09-20 15:27:12,532][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:27:12,533][training][INFO] - Saving training results -[2023-09-20 15:27:12,536][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:27:12,536][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index a75f0da9b0aa0151767b6e6858f2f15bc4de54c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 4e9cd87e0a2fd4da680cfbc59c4c14a6a62b6ba0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,4.364237308502197,146.64647102328345,31.070497274398804,172.51091775787205,35.434736013412476,151.26400258692982 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 8d5c88f6241de713f22a4fbaee0c5bb7f23f5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index 43115c9b8fdaee972178bc9243e2bcbd44b7995e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index ec1fe39ea16e445c5d7092a8b3e071d9b0c55522..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/experiment.log deleted file mode 100644 index a1842dfea6de7c2421ccc43bc3a65ff1d8154671..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:27:14,094][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:27:14,094][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:27:14,094][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:27:14,213][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:27:14,228][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:27:14,700][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:27:14,701][training][INFO] - Running training benchmark -[2023-09-20 15:27:14,701][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:27:14,745][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:27:14,745][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:27:14,746][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:27:14,751][pytorch][INFO] - + Starting training -[2023-09-20 15:28:20,568][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:28:20,569][training][INFO] - Saving training results -[2023-09-20 15:28:20,571][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:28:20,571][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index abcce5eda3bfbb21561ba578be643fe874f4ee2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 3edb57efe55d17c057b14af5cb633b981bcac274..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,7.095812559127808,90.19403974767148,58.61403751373291,91.44567116271737,65.70985412597656,81.57071829324109 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/config.yaml deleted file mode 100644 index e7ef28c956bde8dedcb111288d0157eca31b1c62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/hydra.yaml deleted file mode 100644 index 5dbf8022cf46c447970297e556a4657ef5a21c06..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '2' - num: 2 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/experiment.log deleted file mode 100644 index eaab6304074e335ac2dd87c753fcbb45cc53efc9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:28:22,166][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:28:22,167][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:28:22,167][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:28:22,297][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:28:22,315][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:28:22,786][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:28:22,786][training][INFO] - Running training benchmark -[2023-09-20 15:28:22,787][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:28:22,826][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:28:22,826][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:28:22,827][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:28:22,832][pytorch][INFO] - + Starting training -[2023-09-20 15:28:58,800][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:28:58,801][training][INFO] - Saving training results -[2023-09-20 15:28:58,803][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:28:58,803][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index af5e494ca3514c36d5e090f1af921bd6e849a818..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/training_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index 63de0c637cdea2fd9e87f2587d491f322f793a85..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,8.268535375595093,154.80371575575165,27.590984582901,172.81007082852994,35.8595232963562,132.9632845533251 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/config.yaml deleted file mode 100644 index bf1190e8785c39d372b5122d20df36f210ecf85f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/hydra.yaml deleted file mode 100644 index 2c50b470faacfa06c068a3e2f8076a1360c1974c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '3' - num: 3 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/experiment.log deleted file mode 100644 index 5d60e57318aec75ce0b2a92f31c7a20a23af45af..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:29:00,524][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:29:00,524][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:29:00,524][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:29:00,646][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:29:00,664][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:29:01,147][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:29:01,147][training][INFO] - Running training benchmark -[2023-09-20 15:29:01,147][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:29:01,184][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:29:01,185][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:29:01,186][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:29:01,191][pytorch][INFO] - + Starting training -[2023-09-20 15:30:17,669][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:30:17,669][training][INFO] - Saving training results -[2023-09-20 15:30:17,671][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:30:17,671][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index f53a28377945009d7294ad9edd5f274d64fe05f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/training_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index a0309a4f26d49a7bcfd6e1756498161c746fa472..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samles/s) -0,16.510013103485107,77.52870890997683,59.860506772994995,79.65184822242432,76.37052369117737,62.43246437958915 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index 31586216610ff3c8e9b7efe6d31ed5af08de26e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index 19e0f56bdddba02fba87b6f3dc478eaced43476c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 42ba5ae265d3a292f17fe4c61b80d37b0132f9a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-20 15:31:02,538][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:31:02,686][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:31:06,587][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:31:06,588][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:31:06,589][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:31:06,718][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:31:06,732][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:31:06,733][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:31:08,118][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:31:08,121][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:31:08,122][inference][INFO] - Running inference benchmark -[2023-09-20 15:31:08,122][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:31:08,143][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:31:08,143][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:31:08,461][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:31:08,462][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:31:23,784][inference][INFO] - + Forward pass latency: 3.27e-03 (s) -[2023-09-20 15:31:23,787][inference][INFO] - + Forward pass throughput: 306.00 (samples/s) -[2023-09-20 15:31:23,787][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:31:23,787][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:31:25,023][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:31:25,024][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:31:40,316][inference][INFO] - + Generation pass latency: 5.10e-01 (s) -[2023-09-20 15:31:40,317][inference][INFO] - + Generation pass throughput: 392.00 (tokens/s) -[2023-09-20 15:31:40,317][inference][INFO] - Saving inference results -[2023-09-20 15:31:40,325][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:31:40,325][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 82eb70a083058b7ed98a1ae91483b49ea13d5d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 2605a6f8658fa23f1704877e70a2235838c20e27..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00327,306.0,0.51,392.0 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 43b47967479b8340e846fdf11ba343f34adbfe5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index 85463777d1c962de3ecf9bf377379b911f850062..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index 3fa3a3ba96d6e7c9da97711849375ea3b009dbb2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:31:40,987][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:31:41,952][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:31:41,952][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:31:41,952][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:31:42,071][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:31:42,088][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:31:42,089][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:31:42,465][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:31:42,466][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:31:42,466][inference][INFO] - Running inference benchmark -[2023-09-20 15:31:42,466][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:31:42,467][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:31:42,467][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:31:42,596][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:31:42,597][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:31:58,034][inference][INFO] - + Forward pass latency: 3.03e-03 (s) -[2023-09-20 15:31:58,037][inference][INFO] - + Forward pass throughput: 330.00 (samples/s) -[2023-09-20 15:31:58,037][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:31:58,038][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:31:58,645][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:31:58,646][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:32:14,273][inference][INFO] - + Generation pass latency: 6.25e-01 (s) -[2023-09-20 15:32:14,273][inference][INFO] - + Generation pass throughput: 320.00 (tokens/s) -[2023-09-20 15:32:14,273][inference][INFO] - Saving inference results -[2023-09-20 15:32:14,281][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:32:14,281][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 74699b4dde858de23b60cd15505dc3745c7fa948..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index a5ebce77fb663eafec892b916d6a6befa2377649..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00303,330.0,0.625,320.0 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index 4977442d8bcc23fcf97d0e094e48173757e090dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index fab2a10789546df686bcd821ddf4f9b2bed16a12..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index b8e4a6bdf5c570de687aa373a15ac803e13d82c7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:32:14,916][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:32:15,873][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:32:15,873][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:32:15,874][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:32:15,990][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:32:16,009][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:32:16,010][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:32:16,374][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:32:16,374][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:32:16,375][inference][INFO] - Running inference benchmark -[2023-09-20 15:32:16,375][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:32:16,375][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:32:16,442][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:32:16,464][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:32:16,465][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:32:31,829][inference][INFO] - + Forward pass latency: 6.04e-03 (s) -[2023-09-20 15:32:31,831][inference][INFO] - + Forward pass throughput: 2650.00 (samples/s) -[2023-09-20 15:32:31,831][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:32:31,831][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:32:33,507][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:32:33,507][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:32:48,510][inference][INFO] - + Generation pass latency: 7.90e-01 (s) -[2023-09-20 15:32:48,510][inference][INFO] - + Generation pass throughput: 4050.00 (tokens/s) -[2023-09-20 15:32:48,510][inference][INFO] - Saving inference results -[2023-09-20 15:32:48,515][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:32:48,515][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 46e7c942b1f98367f69538013206b4b487b0db82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 993ff35deb452fe19baa81bff816264369bf31a4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00604,2650.0,0.79,4050.0 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 75a2cec740a09f8fed297594d9998531f5ae4881..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index 44920f5ad3b553fa1313115090ebe48b0d9844d8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index 7f23ade96f7e539cf8484d63074ef48b21e17e14..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 15:32:49,161][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:32:50,176][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:32:50,176][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:32:50,176][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:32:50,295][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:32:50,315][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:32:50,317][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:32:50,694][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:32:50,694][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:32:50,694][inference][INFO] - Running inference benchmark -[2023-09-20 15:32:50,694][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:32:50,695][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:32:50,702][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:32:50,724][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:32:50,724][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:33:06,380][inference][INFO] - + Forward pass latency: 5.57e-03 (s) -[2023-09-20 15:33:06,381][inference][INFO] - + Forward pass throughput: 2870.00 (samples/s) -[2023-09-20 15:33:06,382][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:33:06,382][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:33:07,103][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:33:07,103][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 15:33:22,530][inference][INFO] - + Generation pass latency: 9.07e-01 (s) -[2023-09-20 15:33:22,531][inference][INFO] - + Generation pass throughput: 3530.00 (tokens/s) -[2023-09-20 15:33:22,531][inference][INFO] - Saving inference results -[2023-09-20 15:33:22,536][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:33:22,536][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 2404fd246618d0489ff3ba123302854d0830756a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index f33036c8d4f11e352eebf63e31e1560ded591cdd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00557,2870.0,0.907,3530.0 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index b0c83089a2621f9950400d418a85b4376221df44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index b57687b9ca9bcd9c5c8ab46e80fea240d989d57d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/experiment.log deleted file mode 100644 index de102680270952fd40f73831b8225b8acc662480..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 15:30:25,292][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:30:25,292][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:30:25,294][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:30:25,295][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:30:26,417][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:30:26,424][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:30:26,424][inference][INFO] - Running inference benchmark -[2023-09-20 15:30:26,424][input_generator][INFO] - Using bert model type generator -[2023-09-20 15:30:26,425][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:30:26,425][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:30:26,715][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:30:31,751][inference][INFO] - + Forward pass latency: 3.70e-03 (s) -[2023-09-20 15:30:31,752][inference][INFO] - + Forward pass throughput: 270.00 (samples/s) -[2023-09-20 15:30:31,752][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:30:31,789][inference][INFO] - + Forward pass peak memory: 551 (MB) -[2023-09-20 15:30:31,789][inference][INFO] - Saving inference results -[2023-09-20 15:30:31,795][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:30:31,795][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8159f39871a74c9f18130d7533a9047d427de8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index d94721f8b8e299aab00a2c7ca10f4f373ae66d8b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0,0.0037,270.0,551 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index cf6b2100f521049a9838a290716b3948d38db210..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index cd7e246f591352ca39e4c489c303b987fc1e0066..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/experiment.log deleted file mode 100644 index 49cf1fd5b944930951d6676958d81fba53146edb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 15:30:33,253][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:30:33,253][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:30:33,254][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:30:33,255][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:30:33,456][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:30:33,456][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:30:33,456][inference][INFO] - Running inference benchmark -[2023-09-20 15:30:33,456][input_generator][INFO] - Using bert model type generator -[2023-09-20 15:30:33,457][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:30:33,457][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:30:33,499][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:30:38,530][inference][INFO] - + Forward pass latency: 3.95e-03 (s) -[2023-09-20 15:30:38,531][inference][INFO] - + Forward pass throughput: 1010.00 (samples/s) -[2023-09-20 15:30:38,531][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:30:38,583][inference][INFO] - + Forward pass peak memory: 554 (MB) -[2023-09-20 15:30:38,584][inference][INFO] - Saving inference results -[2023-09-20 15:30:38,588][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:30:38,588][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 00d5f274372d051cb01e74f2a4b26b9ac2511e68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 0c5e9381821b6e27b9dff77922918a4e212553f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0,0.00395,1010.0,554 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index aa8ba5cebbb00543dea83d42e1a3242be877425e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2883b65e42fd3059fdc05503d136ec543aa6ed05..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/experiment.log b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/experiment.log deleted file mode 100644 index 7438c0a607de8f7ec20db9b592a6b6f2ddd84f84..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/experiment.log +++ /dev/null @@ -1,25 +0,0 @@ -[2023-09-20 15:30:46,096][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-20 15:30:46,096][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:30:46,098][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:30:46,098][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 15:30:47,545][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:30:47,549][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:30:47,549][inference][INFO] - Running inference benchmark -[2023-09-20 15:30:47,549][input_generator][INFO] - Using gpt2 model type generator -[2023-09-20 15:30:47,550][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:30:47,550][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:30:47,790][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:30:52,827][inference][INFO] - + Forward pass latency: 3.44e-03 (s) -[2023-09-20 15:30:52,829][inference][INFO] - + Forward pass throughput: 581.00 (samples/s) -[2023-09-20 15:30:52,829][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 15:30:52,875][inference][INFO] - + Forward pass peak memory: 555 (MB) -[2023-09-20 15:30:52,876][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 15:30:52,876][inference][INFO] - + Warming up the generation pass -[2023-09-20 15:30:53,188][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 15:30:58,396][inference][INFO] - + Generation pass latency: 3.06e-01 (s) -[2023-09-20 15:30:58,397][inference][INFO] - + Generation pass throughput: 654.00 (tokens/s) -[2023-09-20 15:30:58,397][inference][INFO] - + Tracking generation pass peak memory -[2023-09-20 15:30:58,772][inference][INFO] - + Generation pass peak memory: 559 (MB) -[2023-09-20 15:30:58,772][inference][INFO] - Saving inference results -[2023-09-20 15:30:58,779][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:30:58,779][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index f46fa028eedfeac3a6b4c27f9b2b7fe54502d316..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 100 - min_new_tokens: 100 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 3e3987bdbb59fc20399f7735c4af75f1d16d5d6c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_14:48:35_00247ea0dec9b2219a43973a2d90c059dfa1df17/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),generate.latency(s),generate.throughput(tokens/s),generate.peak_memory(MB) -0,0.00344,581.0,555,0.306,654.0,559 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 833d05497b59ba26a18c6a302d0bb91fe2b458ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 84950fbb704705a4858876d346f5780c3ce0bc52..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index 8d6d8f16f84a50b1a79c5e37697d3e62129d7306..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/experiment.log deleted file mode 100644 index ac5737192790066ec128321798126c050e0f1c96..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-20 15:58:18,137][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 15:58:21,983][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:58:21,983][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:58:21,986][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:58:22,112][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:58:22,124][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:58:30,810][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:58:30,810][training][INFO] - Running training benchmark -[2023-09-20 15:58:30,811][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:58:30,870][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:58:30,870][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:58:30,871][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:58:30,876][pytorch][INFO] - + Starting training -[2023-09-20 15:59:06,186][pytorch][INFO] - + Training finished successfully -[2023-09-20 15:59:06,187][training][INFO] - Saving training results -[2023-09-20 15:59:06,190][backend][INFO] - Cleaning pytorch backend -[2023-09-20 15:59:06,190][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index a75f0da9b0aa0151767b6e6858f2f15bc4de54c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 1b4f01e5ff1da6a0d3b2482aa162102569e3801a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -4.273942232131958,149.74465382063684,30.926475286483765,173.3142865570121,35.20041847229004,152.2709170125185 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 8d5c88f6241de713f22a4fbaee0c5bb7f23f5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index 8f4afc901d2a9c371f2fb4d3800f74a294e7c055..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index ec1fe39ea16e445c5d7092a8b3e071d9b0c55522..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/experiment.log deleted file mode 100644 index c58267adf283abbf2d65d2fbd2a7e5841cf34154..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 15:59:07,755][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 15:59:07,756][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:59:07,756][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:59:07,876][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:59:07,888][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:59:08,551][benchmark][INFO] - Configuring training benchmark -[2023-09-20 15:59:08,552][training][INFO] - Running training benchmark -[2023-09-20 15:59:08,552][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 15:59:08,591][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 15:59:08,591][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 15:59:08,592][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 15:59:08,597][pytorch][INFO] - + Starting training -[2023-09-20 16:00:22,922][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:00:22,923][training][INFO] - Saving training results -[2023-09-20 16:00:22,925][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:00:22,925][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index abcce5eda3bfbb21561ba578be643fe874f4ee2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 47413742ad1b52db22868f8341c268643a1fa1e4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -8.018710851669312,79.81332808212765,66.19549345970154,80.97227952931658,74.21420788764954,72.22336736537468 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/config.yaml deleted file mode 100644 index e7ef28c956bde8dedcb111288d0157eca31b1c62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/hydra.yaml deleted file mode 100644 index 4f66430e533996875d5cb68d2e4709f91b15a10e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '2' - num: 2 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/experiment.log deleted file mode 100644 index 36b62062a91da815e9107f3618365d83ea24e075..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 16:00:24,453][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:00:24,453][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:00:24,454][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:00:24,573][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:00:24,586][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:00:25,052][benchmark][INFO] - Configuring training benchmark -[2023-09-20 16:00:25,052][training][INFO] - Running training benchmark -[2023-09-20 16:00:25,052][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 16:00:25,090][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 16:00:25,091][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 16:00:25,091][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 16:00:25,096][pytorch][INFO] - + Starting training -[2023-09-20 16:01:09,961][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:01:09,962][training][INFO] - Saving training results -[2023-09-20 16:01:09,964][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:01:09,964][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index af5e494ca3514c36d5e090f1af921bd6e849a818..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/training_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index 61279ec2d83bfe8373edb98fee9228f1a8056379..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -9.764516115188599,131.08688489017638,34.9902446269989,136.2665523155832,44.754762411117554,106.53614818018961 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/config.yaml deleted file mode 100644 index bf1190e8785c39d372b5122d20df36f210ecf85f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/hydra.yaml deleted file mode 100644 index 121855a9491cfab304b002204229ec4feb3b3208..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '3' - num: 3 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/experiment.log deleted file mode 100644 index 6dd2f7e44e1e451d72e2d43b39008503d8d175f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 16:01:11,534][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:01:11,534][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:01:11,534][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:01:11,655][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:01:11,668][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:01:12,174][benchmark][INFO] - Configuring training benchmark -[2023-09-20 16:01:12,175][training][INFO] - Running training benchmark -[2023-09-20 16:01:12,175][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 16:01:12,213][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 16:01:12,213][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 16:01:12,214][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 16:01:12,219][pytorch][INFO] - + Starting training -[2023-09-20 16:02:40,833][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:02:40,834][training][INFO] - Saving training results -[2023-09-20 16:02:40,836][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:02:40,836][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index f53a28377945009d7294ad9edd5f274d64fe05f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/training_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index a788559bc5ccfe0c6fd0d2a0199c711cda4b41ae..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -18.978710889816284,67.44399066044208,69.5253918170929,68.5792611215142,88.50410795211792,53.87320555312033 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index 31586216610ff3c8e9b7efe6d31ed5af08de26e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index 0671ffcf08db27631f3e5f722958e26ed028ff78..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 428f882ace7a3fe55bbfd4944fd45d0c68459229..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-20 16:03:25,603][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:03:25,754][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 16:03:29,724][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:03:29,725][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:03:29,726][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:03:29,852][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:03:29,870][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:03:29,871][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:03:31,262][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:03:31,266][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:03:31,266][inference][INFO] - Running inference benchmark -[2023-09-20 16:03:31,267][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:03:31,287][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:03:31,287][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:03:31,607][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:03:31,607][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:03:46,929][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-20 16:03:46,932][inference][INFO] - + Forward pass throughput: 309.00 (samples/s) -[2023-09-20 16:03:46,932][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:03:46,932][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:03:48,159][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:03:48,160][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:04:03,287][inference][INFO] - + Generation pass latency: 5.04e-01 (s) -[2023-09-20 16:04:03,287][inference][INFO] - + Generation pass throughput: 397.00 (tokens/s) -[2023-09-20 16:04:03,288][inference][INFO] - Saving inference results -[2023-09-20 16:04:03,296][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:04:03,296][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 82eb70a083058b7ed98a1ae91483b49ea13d5d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 17d5eda732f0252ff103407cf8743d45c7844980..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00324,309.0,0.504,397.0 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 43b47967479b8340e846fdf11ba343f34adbfe5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index a3b8c8b18a57ab54a0e65d6bd4d9bd582b35c932..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index d7136754251f933e0f7bb7d026b00867a7c8e393..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,41 +0,0 @@ -[2023-09-20 15:58:04,050][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 15:58:05,059][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 15:58:05,059][backend][INFO] - Configuring pytorch backend -[2023-09-20 15:58:05,060][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:58:05,180][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 15:58:05,193][pytorch][INFO] - + Disabling gradients -[2023-09-20 15:58:05,194][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 15:58:05,580][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 15:58:05,581][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 15:58:05,581][inference][INFO] - Running inference benchmark -[2023-09-20 15:58:05,581][input_generator][INFO] - Using llama model type generator -[2023-09-20 15:58:05,582][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 15:58:05,582][inference][INFO] - + Warming up the forward pass -[2023-09-20 15:58:05,708][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 15:58:05,708][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:04:03,933][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:04:04,897][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:04:04,897][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:04:04,897][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:04:05,020][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:04:05,039][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:04:05,040][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:04:05,417][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:04:05,417][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:04:05,417][inference][INFO] - Running inference benchmark -[2023-09-20 16:04:05,418][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:04:05,418][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:04:05,418][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:04:05,548][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:04:05,548][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:04:21,094][inference][INFO] - + Forward pass latency: 2.96e-03 (s) -[2023-09-20 16:04:21,097][inference][INFO] - + Forward pass throughput: 338.00 (samples/s) -[2023-09-20 16:04:21,097][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:04:21,098][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:04:21,706][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:04:21,706][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:04:37,057][inference][INFO] - + Generation pass latency: 6.14e-01 (s) -[2023-09-20 16:04:37,057][inference][INFO] - + Generation pass throughput: 326.00 (tokens/s) -[2023-09-20 16:04:37,057][inference][INFO] - Saving inference results -[2023-09-20 16:04:37,065][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:04:37,065][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 74699b4dde858de23b60cd15505dc3745c7fa948..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index da41ed5c9a2310617a31ab8406537b8b8b6f0201..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00296,338.0,0.614,326.0 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index 4977442d8bcc23fcf97d0e094e48173757e090dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index 85d3997ee1a25a0dc4ccc1b57fda006dd918f182..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index a93bf84c5bfc2497766421e853d55a7480df6c72..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 16:04:37,714][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:04:38,688][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:04:38,688][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:04:38,688][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:04:38,811][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:04:38,832][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:04:38,833][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:04:39,199][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:04:39,200][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:04:39,200][inference][INFO] - Running inference benchmark -[2023-09-20 16:04:39,200][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:04:39,201][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:04:39,201][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:04:39,223][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:04:39,223][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:04:54,617][inference][INFO] - + Forward pass latency: 6.06e-03 (s) -[2023-09-20 16:04:54,618][inference][INFO] - + Forward pass throughput: 2640.00 (samples/s) -[2023-09-20 16:04:54,619][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:04:54,619][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:04:56,204][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:04:56,204][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:05:11,809][inference][INFO] - + Generation pass latency: 7.80e-01 (s) -[2023-09-20 16:05:11,809][inference][INFO] - + Generation pass throughput: 4100.00 (tokens/s) -[2023-09-20 16:05:11,810][inference][INFO] - Saving inference results -[2023-09-20 16:05:11,815][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:05:11,815][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 46e7c942b1f98367f69538013206b4b487b0db82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 133f808b4d390a602a3a24130b868bce3d955682..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00606,2640.0,0.78,4100.0 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 75a2cec740a09f8fed297594d9998531f5ae4881..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index 59163fe6490549c225e6efb7aa787cf8b6937359..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index 009e7921d8067993f2e5fbacde2390bae029a416..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 16:05:12,458][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:05:13,533][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:05:13,533][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:05:13,533][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:05:13,655][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:05:13,676][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:05:13,677][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:05:14,057][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:05:14,057][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:05:14,057][inference][INFO] - Running inference benchmark -[2023-09-20 16:05:14,058][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:05:14,058][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:05:14,059][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:05:14,081][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:05:14,081][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:05:29,372][inference][INFO] - + Forward pass latency: 5.70e-03 (s) -[2023-09-20 16:05:29,374][inference][INFO] - + Forward pass throughput: 2810.00 (samples/s) -[2023-09-20 16:05:29,374][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:05:29,374][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:05:30,194][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:05:30,195][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:05:45,386][inference][INFO] - + Generation pass latency: 8.94e-01 (s) -[2023-09-20 16:05:45,386][inference][INFO] - + Generation pass throughput: 3580.00 (tokens/s) -[2023-09-20 16:05:45,386][inference][INFO] - Saving inference results -[2023-09-20 16:05:45,392][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:05:45,392][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 2404fd246618d0489ff3ba123302854d0830756a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index cb158f7b416b68a458f0486ac40111263ef30401..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.0057,2810.0,0.894,3580.0 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index b0c83089a2621f9950400d418a85b4376221df44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index e96c3809af95cb58b89f746826899759546dfdfb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/experiment.log deleted file mode 100644 index 6afa5d8ed85570bbee80a8355751d56d8678c327..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 16:02:48,649][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:02:48,650][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:02:48,652][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:02:48,652][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 16:02:49,781][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:02:49,788][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:02:49,788][inference][INFO] - Running inference benchmark -[2023-09-20 16:02:49,789][input_generator][INFO] - Using bert model type generator -[2023-09-20 16:02:49,789][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:02:49,789][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:02:50,029][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:02:55,068][inference][INFO] - + Forward pass latency: 3.28e-03 (s) -[2023-09-20 16:02:55,069][inference][INFO] - + Forward pass throughput: 305.00 (samples/s) -[2023-09-20 16:02:55,069][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 16:02:55,144][inference][INFO] - + Forward pass peak memory: 551 (MB) -[2023-09-20 16:02:55,145][inference][INFO] - Saving inference results -[2023-09-20 16:02:55,150][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:02:55,150][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8159f39871a74c9f18130d7533a9047d427de8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 841a0d58750abaf20eb1f179cc745eac45d29b20..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0.00328,305.0,551 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index cf6b2100f521049a9838a290716b3948d38db210..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index d48d43e0b2cbb8b58f26ceced3cf52a08d7fa4f7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/experiment.log deleted file mode 100644 index e96d4a0bf16e278fd2848f51e7c40cfbf5251ba2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 16:02:56,319][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:02:56,320][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:02:56,321][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:02:56,321][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 16:02:56,520][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:02:56,521][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:02:56,521][inference][INFO] - Running inference benchmark -[2023-09-20 16:02:56,521][input_generator][INFO] - Using bert model type generator -[2023-09-20 16:02:56,521][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:02:56,521][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:02:56,559][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:03:01,595][inference][INFO] - + Forward pass latency: 3.59e-03 (s) -[2023-09-20 16:03:01,596][inference][INFO] - + Forward pass throughput: 1110.00 (samples/s) -[2023-09-20 16:03:01,596][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 16:03:01,642][inference][INFO] - + Forward pass peak memory: 553 (MB) -[2023-09-20 16:03:01,643][inference][INFO] - Saving inference results -[2023-09-20 16:03:01,646][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:03:01,646][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 00d5f274372d051cb01e74f2a4b26b9ac2511e68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index 9c763caf7d5c0e814429a27d6fe5fe70fa8501e7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0.00359,1110.0,553 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index aa8ba5cebbb00543dea83d42e1a3242be877425e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index 2186682f866f2e21b1b4c4e15efa7c2e24e0d592..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/experiment.log b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/experiment.log deleted file mode 100644 index 32dd8ceeb9ecede468a94eeab641e1d88644c520..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/experiment.log +++ /dev/null @@ -1,25 +0,0 @@ -[2023-09-20 16:03:09,012][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-20 16:03:09,013][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:03:09,015][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:03:09,015][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 16:03:10,483][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:03:10,487][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:03:10,487][inference][INFO] - Running inference benchmark -[2023-09-20 16:03:10,488][input_generator][INFO] - Using gpt2 model type generator -[2023-09-20 16:03:10,488][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:03:10,488][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:03:10,674][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:03:15,713][inference][INFO] - + Forward pass latency: 3.39e-03 (s) -[2023-09-20 16:03:15,715][inference][INFO] - + Forward pass throughput: 590.00 (samples/s) -[2023-09-20 16:03:15,715][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 16:03:15,765][inference][INFO] - + Forward pass peak memory: 556 (MB) -[2023-09-20 16:03:15,765][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:03:15,765][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:03:16,061][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:03:21,146][inference][INFO] - + Generation pass latency: 2.99e-01 (s) -[2023-09-20 16:03:21,147][inference][INFO] - + Generation pass throughput: 669.00 (tokens/s) -[2023-09-20 16:03:21,147][inference][INFO] - + Tracking generation pass peak memory -[2023-09-20 16:03:21,793][inference][INFO] - + Generation pass peak memory: 561 (MB) -[2023-09-20 16:03:21,794][inference][INFO] - Saving inference results -[2023-09-20 16:03:21,800][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:03:21,800][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index f46fa028eedfeac3a6b4c27f9b2b7fe54502d316..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 100 - min_new_tokens: 100 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index d0f439a9a967fead9f945775ab93bc8d46f4eb1a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:19:30_f94c9b3d863c1a95b44b5b3ea9ce3cbd27fc7609/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),generate.latency(s),generate.throughput(tokens/s),generate.peak_memory(MB) -0.00339,590.0,556,0.299,669.0,561 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 833d05497b59ba26a18c6a302d0bb91fe2b458ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index be44b594a49415a55ee9bc0d217b06fef969e76a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index 8d6d8f16f84a50b1a79c5e37697d3e62129d7306..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/experiment.log deleted file mode 100644 index 01023cafe3809c8714334c64d613d5ce0ff8c67f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-20 16:06:16,054][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 16:06:20,259][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:06:20,259][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:06:20,261][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:06:20,389][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:06:20,403][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:06:29,171][benchmark][INFO] - Configuring training benchmark -[2023-09-20 16:06:29,171][training][INFO] - Running training benchmark -[2023-09-20 16:06:29,171][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 16:06:29,231][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 16:06:29,231][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 16:06:29,233][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 16:06:29,237][pytorch][INFO] - + Starting training -[2023-09-20 16:07:04,493][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:07:04,494][training][INFO] - Saving training results -[2023-09-20 16:07:04,497][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:07:04,497][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index a75f0da9b0aa0151767b6e6858f2f15bc4de54c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 0d3b4eb54ed2831d39ea25d75a2377835036e9a1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -4.295328855514526,148.99906887882187,30.848360061645508,173.75315865377928,35.14369058609009,152.51670813769042 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 8d5c88f6241de713f22a4fbaee0c5bb7f23f5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index a9fd935a5138b84f49dbe7bf39c08ff612d56740..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index ec1fe39ea16e445c5d7092a8b3e071d9b0c55522..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/experiment.log deleted file mode 100644 index 5073142176aefaccc1933ab0ece38bd40dfb1b64..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 16:07:06,151][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:07:06,151][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:07:06,151][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:07:06,274][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:07:06,292][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:07:06,872][benchmark][INFO] - Configuring training benchmark -[2023-09-20 16:07:06,872][training][INFO] - Running training benchmark -[2023-09-20 16:07:06,873][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 16:07:06,912][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 16:07:06,912][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 16:07:06,913][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 16:07:06,918][pytorch][INFO] - + Starting training -[2023-09-20 16:08:12,835][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:08:12,835][training][INFO] - Saving training results -[2023-09-20 16:08:12,837][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:08:12,837][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index abcce5eda3bfbb21561ba578be643fe874f4ee2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 56b52efc9d046f2ad46ad0965a4265cfa341d41e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -7.073848247528076,90.47409240417973,58.7314658164978,91.26283373799882,65.80531525611877,81.4523869255624 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/config.yaml deleted file mode 100644 index e7ef28c956bde8dedcb111288d0157eca31b1c62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/hydra.yaml deleted file mode 100644 index f39aaf9cf717b288383e6ea02e41de350f60364e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '2' - num: 2 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/experiment.log deleted file mode 100644 index c7c67f38a61839faa6b21b9255ca24006d2ca097..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 16:08:14,597][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:08:14,597][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:08:14,598][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:08:14,718][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:08:14,736][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:08:15,336][benchmark][INFO] - Configuring training benchmark -[2023-09-20 16:08:15,336][training][INFO] - Running training benchmark -[2023-09-20 16:08:15,336][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 16:08:15,375][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 16:08:15,376][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 16:08:15,376][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 16:08:15,381][pytorch][INFO] - + Starting training -[2023-09-20 16:08:45,592][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:08:45,593][training][INFO] - Saving training results -[2023-09-20 16:08:45,595][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:08:45,595][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index af5e494ca3514c36d5e090f1af921bd6e849a818..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/training_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index 3bad2c6590bbbc75b8cdfe988d51c58cee11378d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -6.427045106887817,199.15839685460327,23.675896406173706,201.38625031138,30.102943181991577,158.38982823620884 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/config.yaml deleted file mode 100644 index bf1190e8785c39d372b5122d20df36f210ecf85f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/hydra.yaml deleted file mode 100644 index daa95bb1ca3b7b1e51860bd113e96fe93ce59dc6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '3' - num: 3 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/experiment.log deleted file mode 100644 index 7fc4ddf7469529382a9e55163eee4cec3a98e74f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 16:08:47,157][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:08:47,157][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:08:47,157][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:08:47,280][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:08:47,301][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:08:47,812][benchmark][INFO] - Configuring training benchmark -[2023-09-20 16:08:47,812][training][INFO] - Running training benchmark -[2023-09-20 16:08:47,812][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 16:08:47,851][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 16:08:47,851][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 16:08:47,852][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 16:08:47,857][pytorch][INFO] - + Starting training -[2023-09-20 16:09:51,005][pytorch][INFO] - + Training finished successfully -[2023-09-20 16:09:51,006][training][INFO] - Saving training results -[2023-09-20 16:09:51,008][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:09:51,008][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index f53a28377945009d7294ad9edd5f274d64fe05f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/training_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index fef063fbf019921ebacdb92e35b63a3c6165b760..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -12.58586072921753,101.70142730314309,50.450583934783936,94.50832137371216,63.03644561767578,75.63878250557683 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index 31586216610ff3c8e9b7efe6d31ed5af08de26e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index 3014a4003f901f525987c2cdfe0eba74749f82a5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 07f2914168affb50b87b988aa51e88b167d6a720..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-20 16:10:36,180][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:10:36,329][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 16:10:40,577][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:10:40,577][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:10:40,578][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:10:40,707][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:10:40,722][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:10:40,723][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:10:42,219][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:10:42,223][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:10:42,223][inference][INFO] - Running inference benchmark -[2023-09-20 16:10:42,223][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:10:42,243][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:10:42,244][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:10:42,562][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:10:42,563][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:10:58,199][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-09-20 16:10:58,202][inference][INFO] - + Forward pass throughput: 313.00 (samples/s) -[2023-09-20 16:10:58,202][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:10:58,202][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:10:59,321][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:10:59,322][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:11:14,860][inference][INFO] - + Generation pass latency: 5.18e-01 (s) -[2023-09-20 16:11:14,860][inference][INFO] - + Generation pass throughput: 386.00 (tokens/s) -[2023-09-20 16:11:14,860][inference][INFO] - Saving inference results -[2023-09-20 16:11:14,869][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:11:14,869][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 82eb70a083058b7ed98a1ae91483b49ea13d5d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 9eb275caf247a5ba3416bc25ce2a3766de2160c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00319,313.0,0.518,386.0 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 43b47967479b8340e846fdf11ba343f34adbfe5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index 8e77af0d2e4e49189c6797ce0c9ccd5adeb8cb89..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index 3c0a355c49c59347e5e2f234de53f7ffe0edad7c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 16:11:15,660][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:11:16,629][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:11:16,630][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:11:16,630][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:11:16,747][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:11:16,763][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:11:16,764][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:11:17,135][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:11:17,136][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:11:17,136][inference][INFO] - Running inference benchmark -[2023-09-20 16:11:17,136][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:11:17,137][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:11:17,137][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:11:17,267][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:11:17,267][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:11:32,768][inference][INFO] - + Forward pass latency: 2.61e-03 (s) -[2023-09-20 16:11:32,771][inference][INFO] - + Forward pass throughput: 383.00 (samples/s) -[2023-09-20 16:11:32,771][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:11:32,772][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:11:33,173][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:11:33,173][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:11:48,440][inference][INFO] - + Generation pass latency: 5.45e-01 (s) -[2023-09-20 16:11:48,440][inference][INFO] - + Generation pass throughput: 367.00 (tokens/s) -[2023-09-20 16:11:48,440][inference][INFO] - Saving inference results -[2023-09-20 16:11:48,449][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:11:48,449][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 74699b4dde858de23b60cd15505dc3745c7fa948..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index 1d22185d474e029aa2a9435fb896b67f70e0c34e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00261,383.0,0.545,367.0 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index 4977442d8bcc23fcf97d0e094e48173757e090dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index 955dbe012692602a539e5b61f0b44f69d8da0c58..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index 79a79305ee2a86c957dee17caf525eed797f2af7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 16:11:49,124][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:11:50,168][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:11:50,168][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:11:50,168][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:11:50,289][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:11:50,307][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:11:50,309][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:11:50,709][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:11:50,710][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:11:50,710][inference][INFO] - Running inference benchmark -[2023-09-20 16:11:50,710][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:11:50,711][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:11:50,711][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:11:50,734][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:11:50,734][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:12:05,938][inference][INFO] - + Forward pass latency: 4.51e-03 (s) -[2023-09-20 16:12:05,940][inference][INFO] - + Forward pass throughput: 3550.00 (samples/s) -[2023-09-20 16:12:05,941][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:12:06,038][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:12:07,633][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:12:07,633][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:12:23,092][inference][INFO] - + Generation pass latency: 5.94e-01 (s) -[2023-09-20 16:12:23,093][inference][INFO] - + Generation pass throughput: 5390.00 (tokens/s) -[2023-09-20 16:12:23,093][inference][INFO] - Saving inference results -[2023-09-20 16:12:23,099][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:12:23,099][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 46e7c942b1f98367f69538013206b4b487b0db82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 45b1593f2d39c463d4df2130c624a97f34ea233e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00451,3550.0,0.594,5390.0 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 75a2cec740a09f8fed297594d9998531f5ae4881..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index 73ff9d116617035c133bc48d20c51c41742cbbef..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index ef0e47afcf1528e74f7749bdb747590bf8b42a09..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 16:12:23,733][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 16:12:24,696][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 16:12:24,697][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:12:24,697][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:12:24,818][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 16:12:24,838][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:12:24,839][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 16:12:25,216][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:12:25,217][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:12:25,217][inference][INFO] - Running inference benchmark -[2023-09-20 16:12:25,217][input_generator][INFO] - Using llama model type generator -[2023-09-20 16:12:25,218][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:12:25,298][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:12:25,320][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:12:25,320][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:12:40,752][inference][INFO] - + Forward pass latency: 3.95e-03 (s) -[2023-09-20 16:12:40,754][inference][INFO] - + Forward pass throughput: 4050.00 (samples/s) -[2023-09-20 16:12:40,754][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:12:40,754][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:12:41,369][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:12:41,370][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 16:12:56,500][inference][INFO] - + Generation pass latency: 6.30e-01 (s) -[2023-09-20 16:12:56,500][inference][INFO] - + Generation pass throughput: 5080.00 (tokens/s) -[2023-09-20 16:12:56,501][inference][INFO] - Saving inference results -[2023-09-20 16:12:56,507][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:12:56,507][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 2404fd246618d0489ff3ba123302854d0830756a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index 7f58aed78859573a8cd154d38a469cc6f92775e8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00395,4050.0,0.63,5080.0 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index b0c83089a2621f9950400d418a85b4376221df44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 7e94e040a737e255839c566d5226214e3b1758af..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/experiment.log deleted file mode 100644 index accedd25517d8554252f163ff5639501af0c3dba..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 16:09:58,972][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:09:58,973][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:09:58,975][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:09:58,975][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 16:10:00,211][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:10:00,219][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:10:00,219][inference][INFO] - Running inference benchmark -[2023-09-20 16:10:00,219][input_generator][INFO] - Using bert model type generator -[2023-09-20 16:10:00,219][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:10:00,220][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:10:00,420][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:10:05,462][inference][INFO] - + Forward pass latency: 3.17e-03 (s) -[2023-09-20 16:10:05,463][inference][INFO] - + Forward pass throughput: 315.00 (samples/s) -[2023-09-20 16:10:05,463][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 16:10:05,544][inference][INFO] - + Forward pass peak memory: 551 (MB) -[2023-09-20 16:10:05,545][inference][INFO] - Saving inference results -[2023-09-20 16:10:05,551][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:10:05,551][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8159f39871a74c9f18130d7533a9047d427de8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 3f59d14e312685ae5d2428205e192accf4d2adfb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0.00317,315.0,551 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index cf6b2100f521049a9838a290716b3948d38db210..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 1c6434243baedb6e15e68ce6ff6c10e7d12c8fd8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/experiment.log deleted file mode 100644 index feafd7de3754dbcb2d18bd5801eb46984f86392b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 16:10:06,721][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 16:10:06,721][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:10:06,722][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:10:06,722][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 16:10:06,923][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:10:06,924][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:10:06,924][inference][INFO] - Running inference benchmark -[2023-09-20 16:10:06,924][input_generator][INFO] - Using bert model type generator -[2023-09-20 16:10:06,924][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:10:06,924][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:10:06,962][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:10:11,995][inference][INFO] - + Forward pass latency: 3.72e-03 (s) -[2023-09-20 16:10:11,999][inference][INFO] - + Forward pass throughput: 1080.00 (samples/s) -[2023-09-20 16:10:11,999][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 16:10:12,074][inference][INFO] - + Forward pass peak memory: 554 (MB) -[2023-09-20 16:10:12,075][inference][INFO] - Saving inference results -[2023-09-20 16:10:12,079][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:10:12,080][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 00d5f274372d051cb01e74f2a4b26b9ac2511e68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index ddc9730a8ab4586c9953ef40a253c7a502d1a476..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0.00372,1080.0,554 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index aa8ba5cebbb00543dea83d42e1a3242be877425e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index a3adfa1ab5470e21e5874e38d412da62b2d869d7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/experiment.log b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/experiment.log deleted file mode 100644 index eb034c3d3a2b4bbd61bf0cdf8f18e470852c47f8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/experiment.log +++ /dev/null @@ -1,25 +0,0 @@ -[2023-09-20 16:10:19,770][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-20 16:10:19,770][backend][INFO] - Configuring pytorch backend -[2023-09-20 16:10:19,773][pytorch][INFO] - + Disabling gradients -[2023-09-20 16:10:19,773][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 16:10:21,213][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 16:10:21,217][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 16:10:21,217][inference][INFO] - Running inference benchmark -[2023-09-20 16:10:21,217][input_generator][INFO] - Using gpt2 model type generator -[2023-09-20 16:10:21,217][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 16:10:21,218][inference][INFO] - + Warming up the forward pass -[2023-09-20 16:10:21,286][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 16:10:26,329][inference][INFO] - + Forward pass latency: 3.24e-03 (s) -[2023-09-20 16:10:26,330][inference][INFO] - + Forward pass throughput: 617.00 (samples/s) -[2023-09-20 16:10:26,330][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 16:10:26,401][inference][INFO] - + Forward pass peak memory: 555 (MB) -[2023-09-20 16:10:26,402][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 16:10:26,402][inference][INFO] - + Warming up the generation pass -[2023-09-20 16:10:26,717][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 16:10:32,009][inference][INFO] - + Generation pass latency: 3.11e-01 (s) -[2023-09-20 16:10:32,009][inference][INFO] - + Generation pass throughput: 643.00 (tokens/s) -[2023-09-20 16:10:32,010][inference][INFO] - + Tracking generation pass peak memory -[2023-09-20 16:10:32,355][inference][INFO] - + Generation pass peak memory: 558 (MB) -[2023-09-20 16:10:32,355][inference][INFO] - Saving inference results -[2023-09-20 16:10:32,362][backend][INFO] - Cleaning pytorch backend -[2023-09-20 16:10:32,362][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index f46fa028eedfeac3a6b4c27f9b2b7fe54502d316..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 100 - min_new_tokens: 100 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index f617deb206c0f7e3cfc6ba2b4f216e1c93003248..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_15:38:59_0b5024ce725a0f6b6d8cfe740e7a2a6021257c37/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),generate.latency(s),generate.throughput(tokens/s),generate.peak_memory(MB) -0.00324,617.0,555,0.311,643.0,558 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 833d05497b59ba26a18c6a302d0bb91fe2b458ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 86da6cdb47ccbf65410c595a0c8b0c43f3c5ac2a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index 8d6d8f16f84a50b1a79c5e37697d3e62129d7306..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/experiment.log deleted file mode 100644 index 66affb345d83ca7fbd233b4a239178e91397c67a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-20 18:49:49,521][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 18:49:53,227][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 18:49:53,228][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:49:53,230][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:49:53,359][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:49:53,371][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:50:02,076][benchmark][INFO] - Configuring training benchmark -[2023-09-20 18:50:02,077][training][INFO] - Running training benchmark -[2023-09-20 18:50:02,077][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 18:50:02,135][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 18:50:02,135][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 18:50:02,136][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 18:50:02,141][pytorch][INFO] - + Starting training -[2023-09-20 18:50:37,455][pytorch][INFO] - + Training finished successfully -[2023-09-20 18:50:37,456][training][INFO] - Saving training results -[2023-09-20 18:50:37,460][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:50:37,460][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index a75f0da9b0aa0151767b6e6858f2f15bc4de54c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 2afeeebdd6499778e103c5055f74329b2defc9d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -4.3010945320129395,148.79933357346508,30.903000831604004,173.44593909205133,35.20409655570984,152.25500792266882 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 8d5c88f6241de713f22a4fbaee0c5bb7f23f5dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index 74efb98dfd1a1fcded0b35e6db9889b7d0e07875..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=16,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index ec1fe39ea16e445c5d7092a8b3e071d9b0c55522..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/experiment.log deleted file mode 100644 index b45ba045ee46885a241e8b66ef54ea6efe64147d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 18:50:38,957][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 18:50:38,957][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:50:38,958][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:50:39,079][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:50:39,095][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:50:39,808][benchmark][INFO] - Configuring training benchmark -[2023-09-20 18:50:39,808][training][INFO] - Running training benchmark -[2023-09-20 18:50:39,808][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 18:50:39,848][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 18:50:39,849][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 18:50:39,850][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 18:50:39,855][pytorch][INFO] - + Starting training -[2023-09-20 18:52:04,181][pytorch][INFO] - + Training finished successfully -[2023-09-20 18:52:04,181][training][INFO] - Saving training results -[2023-09-20 18:52:04,183][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:52:04,183][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index abcce5eda3bfbb21561ba578be643fe874f4ee2c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 16 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index d4501590b6087d752c996dd47320887cbfd55a2e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -9.112696647644043,70.23168056027222,75.10427641868591,71.36744078485569,84.21697664260864,63.64512493420676 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/config.yaml deleted file mode 100644 index e7ef28c956bde8dedcb111288d0157eca31b1c62..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/hydra.yaml deleted file mode 100644 index 7ead0b995b75edc4c94a6c82a35fff9a6abfb328..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '2' - num: 2 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/experiment.log deleted file mode 100644 index 027bc10226a9c444f38fd8679f95448b09b1cd4a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 18:52:05,695][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 18:52:05,695][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:52:05,695][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:52:05,818][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:52:05,836][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:52:06,411][benchmark][INFO] - Configuring training benchmark -[2023-09-20 18:52:06,411][training][INFO] - Running training benchmark -[2023-09-20 18:52:06,411][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 18:52:06,450][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 18:52:06,451][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 18:52:06,451][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 18:52:06,456][pytorch][INFO] - + Starting training -[2023-09-20 18:52:51,150][pytorch][INFO] - + Training finished successfully -[2023-09-20 18:52:51,151][training][INFO] - Saving training results -[2023-09-20 18:52:51,154][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:52:51,154][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index af5e494ca3514c36d5e090f1af921bd6e849a818..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/training_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index 82de61b92cd0a598a0506446fccbcff01db59436..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -9.749308347702026,131.29136492043602,34.83749222755432,136.8640420170314,44.58680176734924,106.9374750151196 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/config.yaml deleted file mode 100644 index bf1190e8785c39d372b5122d20df36f210ecf85f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/hydra.yaml deleted file mode 100644 index 3375a9b98107d775ba5e637df303ceaa7d0ad590..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: 16,32 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '3' - num: 3 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/experiment.log deleted file mode 100644 index a94a1925a6727ee0b633e1d94b705d6d97a77e34..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-20 18:52:52,753][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 18:52:52,753][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:52:52,753][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:52:52,874][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:52:52,892][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:52:53,412][benchmark][INFO] - Configuring training benchmark -[2023-09-20 18:52:53,412][training][INFO] - Running training benchmark -[2023-09-20 18:52:53,413][dataset_generator][INFO] - Using text-classification task generator -[2023-09-20 18:52:53,452][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-20 18:52:53,452][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-20 18:52:53,453][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-20 18:52:53,458][pytorch][INFO] - + Starting training -[2023-09-20 18:54:19,214][pytorch][INFO] - + Training finished successfully -[2023-09-20 18:54:19,215][training][INFO] - Saving training results -[2023-09-20 18:54:19,217][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:54:19,217][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index f53a28377945009d7294ad9edd5f274d64fe05f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/training_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index f505497fd9a7840665c37254e73fb66b3d0103f1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -18.963727474212646,67.49727877816089,66.6864264011383,71.49880803807193,85.65015506744385,55.66831719388616 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index 31586216610ff3c8e9b7efe6d31ed5af08de26e0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index bf2ea45378db27262ec9fd742fe2dd213c90371f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 173760f19628b566b28fbd100b076a6792368169..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-20 18:55:03,908][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 18:55:04,065][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-20 18:55:08,485][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 18:55:08,485][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:55:08,486][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:55:08,613][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:55:08,627][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:55:08,628][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:55:10,023][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:55:10,027][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:55:10,027][inference][INFO] - Running inference benchmark -[2023-09-20 18:55:10,028][input_generator][INFO] - Using llama model type generator -[2023-09-20 18:55:10,047][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:55:10,047][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:55:10,367][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:55:10,367][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:55:25,887][inference][INFO] - + Forward pass latency: 3.15e-03 (s) -[2023-09-20 18:55:25,890][inference][INFO] - + Forward pass throughput: 317.00 (samples/s) -[2023-09-20 18:55:25,890][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 18:55:25,890][inference][INFO] - + Warming up the generation pass -[2023-09-20 18:55:27,124][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 18:55:27,124][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:55:42,230][inference][INFO] - + Generation pass latency: 5.03e-01 (s) -[2023-09-20 18:55:42,230][inference][INFO] - + Generation pass throughput: 398.00 (tokens/s) -[2023-09-20 18:55:42,230][inference][INFO] - Saving inference results -[2023-09-20 18:55:42,239][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:55:42,239][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 82eb70a083058b7ed98a1ae91483b49ea13d5d6d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index ef55ef2a6cb2bb59d939e27a7cea9cba6981dfb7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00315,317.0,0.503,398.0 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 43b47967479b8340e846fdf11ba343f34adbfe5d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index ccdf0fed112cb73484d2620c646f85cb8f1c95d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index 3e83eea51ebd1ee1840300f36c0774eea213e857..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 18:55:42,892][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 18:55:43,857][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 18:55:43,857][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:55:43,857][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:55:43,980][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:55:43,995][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:55:43,996][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:55:44,489][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:55:44,490][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:55:44,490][inference][INFO] - Running inference benchmark -[2023-09-20 18:55:44,490][input_generator][INFO] - Using llama model type generator -[2023-09-20 18:55:44,491][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:55:44,491][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:55:44,739][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:55:44,739][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:56:00,185][inference][INFO] - + Forward pass latency: 2.92e-03 (s) -[2023-09-20 18:56:00,188][inference][INFO] - + Forward pass throughput: 342.00 (samples/s) -[2023-09-20 18:56:00,189][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 18:56:00,286][inference][INFO] - + Warming up the generation pass -[2023-09-20 18:56:00,785][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 18:56:00,785][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:56:16,330][inference][INFO] - + Generation pass latency: 6.22e-01 (s) -[2023-09-20 18:56:16,331][inference][INFO] - + Generation pass throughput: 322.00 (tokens/s) -[2023-09-20 18:56:16,331][inference][INFO] - Saving inference results -[2023-09-20 18:56:16,338][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:56:16,338][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 74699b4dde858de23b60cd15505dc3745c7fa948..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index beb60e89bc69f64204df603b55aeefebcaad362b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00292,342.0,0.622,322.0 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index 4977442d8bcc23fcf97d0e094e48173757e090dd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index 6ffb095ff46b5225f81db426dad352864721f083..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index 657191a824c3d2ba5a23a3850578bea3f2d5ab8a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 18:56:17,017][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 18:56:17,978][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 18:56:17,978][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:56:17,978][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:56:18,101][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:56:18,120][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:56:18,121][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:56:18,526][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:56:18,527][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:56:18,527][inference][INFO] - Running inference benchmark -[2023-09-20 18:56:18,528][input_generator][INFO] - Using llama model type generator -[2023-09-20 18:56:18,529][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:56:18,529][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:56:18,577][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:56:18,577][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:56:33,749][inference][INFO] - + Forward pass latency: 5.21e-03 (s) -[2023-09-20 18:56:33,751][inference][INFO] - + Forward pass throughput: 3070.00 (samples/s) -[2023-09-20 18:56:33,751][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 18:56:33,751][inference][INFO] - + Warming up the generation pass -[2023-09-20 18:56:35,202][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 18:56:35,202][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:56:50,505][inference][INFO] - + Generation pass latency: 6.65e-01 (s) -[2023-09-20 18:56:50,505][inference][INFO] - + Generation pass throughput: 4810.00 (tokens/s) -[2023-09-20 18:56:50,505][inference][INFO] - Saving inference results -[2023-09-20 18:56:50,511][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:56:50,511][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 46e7c942b1f98367f69538013206b4b487b0db82..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 69e1d0f5e1edb981aaa98b7dcfa7dbd8f44d8ea8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00521,3070.0,0.665,4810.0 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 75a2cec740a09f8fed297594d9998531f5ae4881..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index a02350776c612fa28c69b00fe064e2759ff1d1f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index 9a3b9950fee8101d59f7c42ca91c4cce3a062a6c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-20 18:56:51,146][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-20 18:56:52,120][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-20 18:56:52,120][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:56:52,120][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:56:52,239][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-20 18:56:52,257][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:56:52,258][pytorch][INFO] - + Loading model on device: cuda -[2023-09-20 18:56:52,632][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:56:52,632][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:56:52,633][inference][INFO] - Running inference benchmark -[2023-09-20 18:56:52,633][input_generator][INFO] - Using llama model type generator -[2023-09-20 18:56:52,633][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:56:52,634][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:56:52,655][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:56:52,656][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:57:07,977][inference][INFO] - + Forward pass latency: 3.93e-03 (s) -[2023-09-20 18:57:07,979][inference][INFO] - + Forward pass throughput: 4070.00 (samples/s) -[2023-09-20 18:57:07,979][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 18:57:07,980][inference][INFO] - + Warming up the generation pass -[2023-09-20 18:57:08,599][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 18:57:08,599][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-20 18:57:24,114][inference][INFO] - + Generation pass latency: 5.54e-01 (s) -[2023-09-20 18:57:24,114][inference][INFO] - + Generation pass throughput: 5780.00 (tokens/s) -[2023-09-20 18:57:24,114][inference][INFO] - Saving inference results -[2023-09-20 18:57:24,120][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:57:24,121][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 2404fd246618d0489ff3ba123302854d0830756a..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 15 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index 7a1d8849659bca61dc6734cd5097a811c4ab8606..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00393,4070.0,0.554,5780.0 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/config.yaml deleted file mode 100644 index b0c83089a2621f9950400d418a85b4376221df44..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/hydra.yaml deleted file mode 100644 index 3be29cda8a1e5a5079d99bb5a71ee95bda531dec..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/overrides.yaml deleted file mode 100644 index 989520ffb456f7ab0fc88baccced91b0d27dd4c1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=1 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/experiment.log deleted file mode 100644 index b68dc1fbc3d319b7a3aa5b864ae5e9e50e98547e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 18:54:27,045][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 18:54:27,045][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:54:27,047][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:54:27,048][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 18:54:28,177][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:54:28,184][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:54:28,185][inference][INFO] - Running inference benchmark -[2023-09-20 18:54:28,185][input_generator][INFO] - Using bert model type generator -[2023-09-20 18:54:28,185][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:54:28,185][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:54:28,407][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:54:33,448][inference][INFO] - + Forward pass latency: 3.13e-03 (s) -[2023-09-20 18:54:33,449][inference][INFO] - + Forward pass throughput: 319.00 (samples/s) -[2023-09-20 18:54:33,449][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 18:54:33,485][inference][INFO] - + Forward pass peak memory: 551 (MB) -[2023-09-20 18:54:33,485][inference][INFO] - Saving inference results -[2023-09-20 18:54:33,491][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:54:33,491][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 8159f39871a74c9f18130d7533a9047d427de8e1..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 1e00eb9800d5aaaa037528a69210bb14f7920790..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0.00313,319.0,551 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/config.yaml deleted file mode 100644 index cf6b2100f521049a9838a290716b3948d38db210..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/hydra.yaml deleted file mode 100644 index 7c0e64c80283b2f36238b4dbf7291d4c83096d40..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,172 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,4 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=4 - job: - name: experiment - chdir: true - override_dirname: benchmark.input_shapes.batch_size=4 - id: '1' - num: 1 - config_name: bert_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/overrides.yaml deleted file mode 100644 index eef8c9ca24d6c4f5dd107f3227cbeef63ef5e1b9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- benchmark.input_shapes.batch_size=4 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/experiment.log deleted file mode 100644 index 513d508bf2843fa42d230085b0d9b07f25030c03..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/experiment.log +++ /dev/null @@ -1,18 +0,0 @@ -[2023-09-20 18:54:34,648][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-20 18:54:34,648][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:54:34,649][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:54:34,649][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 18:54:34,851][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:54:34,851][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:54:34,852][inference][INFO] - Running inference benchmark -[2023-09-20 18:54:34,852][input_generator][INFO] - Using bert model type generator -[2023-09-20 18:54:34,852][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:54:34,852][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:54:34,960][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:54:39,996][inference][INFO] - + Forward pass latency: 3.55e-03 (s) -[2023-09-20 18:54:39,997][inference][INFO] - + Forward pass throughput: 1130.00 (samples/s) -[2023-09-20 18:54:39,997][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 18:54:40,109][inference][INFO] - + Forward pass peak memory: 554 (MB) -[2023-09-20 18:54:40,109][inference][INFO] - Saving inference results -[2023-09-20 18:54:40,113][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:54:40,113][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/hydra_config.yaml deleted file mode 100644 index 00d5f274372d051cb01e74f2a4b26b9ac2511e68..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/hydra_config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 4 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: false - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/inference_results.csv deleted file mode 100644 index aa0df54edbbbf95b0e2c0eb9a8ab7e9e0e39be02..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_bert_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB) -0.00355,1130.0,554 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/config.yaml deleted file mode 100644 index aa8ba5cebbb00543dea83d42e1a3242be877425e..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/hydra.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/hydra.yaml deleted file mode 100644 index e2062be3ff9fb679ec89c09e41cf7891a002ed38..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,170 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: '0' - num: 0 - config_name: gpt2_cpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/overrides.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/overrides.yaml deleted file mode 100644 index fe51488c7066f6687ef680d6bfaa4f7768ef205c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/.config/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/experiment.log b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/experiment.log deleted file mode 100644 index 28625eb1660942dabbb3439e9521ae104970d6f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/experiment.log +++ /dev/null @@ -1,25 +0,0 @@ -[2023-09-20 18:54:47,759][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-09-20 18:54:47,760][backend][INFO] - Configuring pytorch backend -[2023-09-20 18:54:47,762][pytorch][INFO] - + Disabling gradients -[2023-09-20 18:54:47,762][pytorch][INFO] - + Loading model on device: cpu -[2023-09-20 18:54:49,191][pytorch][INFO] - + Turning on model's eval mode -[2023-09-20 18:54:49,195][benchmark][INFO] - Configuring inference benchmark -[2023-09-20 18:54:49,195][inference][INFO] - Running inference benchmark -[2023-09-20 18:54:49,195][input_generator][INFO] - Using gpt2 model type generator -[2023-09-20 18:54:49,195][inference][INFO] - + Preparing input for the forward pass -[2023-09-20 18:54:49,196][inference][INFO] - + Warming up the forward pass -[2023-09-20 18:54:49,281][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-20 18:54:54,323][inference][INFO] - + Forward pass latency: 3.19e-03 (s) -[2023-09-20 18:54:54,324][inference][INFO] - + Forward pass throughput: 627.00 (samples/s) -[2023-09-20 18:54:54,324][inference][INFO] - + Tracking forward pass peak memory -[2023-09-20 18:54:54,397][inference][INFO] - + Forward pass peak memory: 555 (MB) -[2023-09-20 18:54:54,398][inference][INFO] - + Preparing input for the generation pass -[2023-09-20 18:54:54,398][inference][INFO] - + Warming up the generation pass -[2023-09-20 18:54:54,702][inference][INFO] - + Tracking generation latency and throughput -[2023-09-20 18:54:59,789][inference][INFO] - + Generation pass latency: 2.99e-01 (s) -[2023-09-20 18:54:59,790][inference][INFO] - + Generation pass throughput: 669.00 (tokens/s) -[2023-09-20 18:54:59,790][inference][INFO] - + Tracking generation pass peak memory -[2023-09-20 18:55:00,139][inference][INFO] - + Generation pass peak memory: 560 (MB) -[2023-09-20 18:55:00,140][inference][INFO] - Saving inference results -[2023-09-20 18:55:00,146][backend][INFO] - Cleaning pytorch backend -[2023-09-20 18:55:00,146][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index f46fa028eedfeac3a6b4c27f9b2b7fe54502d316..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 5 - warmup_runs: 10 - memory: true - energy: false - input_shapes: - batch_size: 2 - sequence_length: 16 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: null - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 100 - min_new_tokens: 100 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 45c0120ae93933afc8909592446a78cf395e28ce..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-20_16:51:56_e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),generate.latency(s),generate.throughput(tokens/s),generate.peak_memory(MB) -0.00319,627.0,555,0.299,669.0,560